diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e045170561d..3dcce68ab46 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -59,6 +59,9 @@ At a minimum, the following information should be added (but add more as needed) - [ ] Exclude: All with TSAN, MSAN, UBSAN, Coverage - [ ] Exclude: All with aarch64, release, debug --- +- [ ] Run only fuzzers related jobs (libFuzzer fuzzers, AST fuzzers, etc.) +- [ ] Exclude: AST fuzzers +--- - [ ] Do not test - [ ] Woolen Wolfdog - [ ] Upload binaries for special builds diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index d4993b373df..eb16c25f604 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -101,6 +101,7 @@ jobs: --volume=".:/wd" --workdir="/wd" \ clickhouse/style-test \ ./tests/ci/changelog.py -v --debug-helpers \ + --gh-user-or-token ${{ secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN }} \ --jobs=5 \ --output="./docs/changelogs/${{ env.RELEASE_TAG }}.md" ${{ env.RELEASE_TAG }} git add ./docs/changelogs/${{ env.RELEASE_TAG }}.md @@ -129,9 +130,9 @@ jobs: if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash run: | - python3 ./tests/ci/create_release.py --set-progress-completed git reset --hard HEAD git checkout "$GITHUB_REF_NAME" + python3 ./tests/ci/create_release.py --set-progress-completed - name: Create GH Release if: ${{ inputs.type == 'patch' && ! inputs.only-repo }} shell: bash diff --git a/.gitmodules b/.gitmodules index 7fdfb1103c5..cdee6a43ad8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -230,9 +230,6 @@ [submodule "contrib/minizip-ng"] path = contrib/minizip-ng url = https://github.com/zlib-ng/minizip-ng -[submodule "contrib/annoy"] - path = contrib/annoy - url = https://github.com/ClickHouse/annoy [submodule "contrib/qpl"] path = contrib/qpl url = https://github.com/intel/qpl @@ -348,9 +345,6 @@ [submodule "contrib/FP16"] path = contrib/FP16 url = https://github.com/Maratyszcza/FP16.git -[submodule "contrib/robin-map"] - path = contrib/robin-map - url = https://github.com/Tessil/robin-map.git [submodule "contrib/aklomp-base64"] path = contrib/aklomp-base64 url = https://github.com/aklomp/base64.git diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b4e0484ab1..6fa91fa002f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -187,14 +187,6 @@ else () set(NO_WHOLE_ARCHIVE --no-whole-archive) endif () -if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") - # Can be lld or ld-lld or lld-13 or /path/to/lld. - if (LINKER_NAME MATCHES "lld") - set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index") - message (STATUS "Adding .gdb-index via --gdb-index linker option.") - endif () -endif() - if (NOT (SANITIZE_COVERAGE OR WITH_COVERAGE) AND (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" @@ -330,17 +322,21 @@ if (DISABLE_OMIT_FRAME_POINTER) set (CMAKE_ASM_FLAGS_ADD "${CMAKE_ASM_FLAGS_ADD} -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer") endif() +# Before you start hating your debugger because it refuses to show variables (''), try building with -DDEBUG_O_LEVEL="0" +# https://stackoverflow.com/questions/63386189/whats-the-difference-between-a-compilers-o0-option-and-og-option/63386263#63386263 +set(DEBUG_O_LEVEL "g" CACHE STRING "The -Ox level used for debug builds") + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") -set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") +set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${COMPILER_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") -set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") +set (CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_C_FLAGS_ADD}") set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") -set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -Og ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") +set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}") if (OS_DARWIN) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") @@ -402,7 +398,7 @@ if ((NOT OS_LINUX AND NOT OS_ANDROID) OR (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") set(ENABLE_GWP_ASAN OFF) endif () -option (ENABLE_FIU "Enable Fiu" ON) +option (ENABLE_LIBFIU "Enable libfiu" ON) option(WERROR "Enable -Werror compiler option" ON) diff --git a/base/base/CMakeLists.txt b/base/base/CMakeLists.txt index 341c92d3042..247028b96e0 100644 --- a/base/base/CMakeLists.txt +++ b/base/base/CMakeLists.txt @@ -1,4 +1,4 @@ -add_compile_options($<$,$>:${COVERAGE_FLAGS}>) +add_compile_options("$<$,$>:${COVERAGE_FLAGS}>") if (USE_CLANG_TIDY) set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") diff --git a/base/base/cgroupsv2.cpp b/base/base/cgroupsv2.cpp index 87f62bf377d..e0e37c8729b 100644 --- a/base/base/cgroupsv2.cpp +++ b/base/base/cgroupsv2.cpp @@ -27,27 +27,6 @@ bool cgroupsV2Enabled() #endif } -bool cgroupsV2MemoryControllerEnabled() -{ -#if defined(OS_LINUX) - chassert(cgroupsV2Enabled()); - /// According to https://docs.kernel.org/admin-guide/cgroup-v2.html, file "cgroup.controllers" defines which controllers are available - /// for the current + child cgroups. The set of available controllers can be restricted from level to level using file - /// "cgroups.subtree_control". It is therefore sufficient to check the bottom-most nested "cgroup.controllers" file. - fs::path cgroup_dir = cgroupV2PathOfProcess(); - if (cgroup_dir.empty()) - return false; - std::ifstream controllers_file(cgroup_dir / "cgroup.controllers"); - if (!controllers_file.is_open()) - return false; - std::string controllers; - std::getline(controllers_file, controllers); - return controllers.find("memory") != std::string::npos; -#else - return false; -#endif -} - fs::path cgroupV2PathOfProcess() { #if defined(OS_LINUX) @@ -71,3 +50,28 @@ fs::path cgroupV2PathOfProcess() return {}; #endif } + +std::optional getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name) +{ +#if defined(OS_LINUX) + if (!cgroupsV2Enabled()) + return {}; + + fs::path current_cgroup = cgroupV2PathOfProcess(); + if (current_cgroup.empty()) + return {}; + + /// Return the bottom-most nested file. If there is no such file at the current + /// level, try again at the parent level as settings are inherited. + while (current_cgroup != default_cgroups_mount.parent_path()) + { + const auto path = current_cgroup / file_name; + if (fs::exists(path)) + return {current_cgroup}; + current_cgroup = current_cgroup.parent_path(); + } + return {}; +#else + return {}; +#endif +} diff --git a/base/base/cgroupsv2.h b/base/base/cgroupsv2.h index cfb916ff358..a6276474254 100644 --- a/base/base/cgroupsv2.h +++ b/base/base/cgroupsv2.h @@ -1,6 +1,7 @@ #pragma once #include +#include #if defined(OS_LINUX) /// I think it is possible to mount the cgroups hierarchy somewhere else (e.g. when in containers). @@ -11,11 +12,11 @@ static inline const std::filesystem::path default_cgroups_mount = "/sys/fs/cgrou /// Is cgroups v2 enabled on the system? bool cgroupsV2Enabled(); -/// Is the memory controller of cgroups v2 enabled on the system? -/// Assumes that cgroupsV2Enabled() is enabled. -bool cgroupsV2MemoryControllerEnabled(); - /// Detects which cgroup v2 the process belongs to and returns the filesystem path to the cgroup. /// Returns an empty path the cgroup cannot be determined. /// Assumes that cgroupsV2Enabled() is enabled. std::filesystem::path cgroupV2PathOfProcess(); + +/// Returns the most nested cgroup dir containing the specified file. +/// If cgroups v2 is not enabled - returns an empty optional. +std::optional getCgroupsV2PathContainingFile([[maybe_unused]] std::string_view file_name); diff --git a/base/base/getMemoryAmount.cpp b/base/base/getMemoryAmount.cpp index 03aab1eac72..bbfbecdbffd 100644 --- a/base/base/getMemoryAmount.cpp +++ b/base/base/getMemoryAmount.cpp @@ -19,9 +19,6 @@ std::optional getCgroupsV2MemoryLimit() if (!cgroupsV2Enabled()) return {}; - if (!cgroupsV2MemoryControllerEnabled()) - return {}; - std::filesystem::path current_cgroup = cgroupV2PathOfProcess(); if (current_cgroup.empty()) return {}; diff --git a/base/poco/Net/include/Poco/Net/HTTPServerSession.h b/base/poco/Net/include/Poco/Net/HTTPServerSession.h index 3df7995509a..b0659ca405c 100644 --- a/base/poco/Net/include/Poco/Net/HTTPServerSession.h +++ b/base/poco/Net/include/Poco/Net/HTTPServerSession.h @@ -58,6 +58,10 @@ namespace Net void setKeepAliveTimeout(Poco::Timespan keepAliveTimeout); + size_t getKeepAliveTimeout() const { return _keepAliveTimeout.totalSeconds(); } + + size_t getMaxKeepAliveRequests() const { return _maxKeepAliveRequests; } + private: bool _firstRequest; Poco::Timespan _keepAliveTimeout; diff --git a/base/poco/Net/src/HTTPServerSession.cpp b/base/poco/Net/src/HTTPServerSession.cpp index f67a63a9e0e..8eec3e14872 100644 --- a/base/poco/Net/src/HTTPServerSession.cpp +++ b/base/poco/Net/src/HTTPServerSession.cpp @@ -19,11 +19,11 @@ namespace Poco { namespace Net { -HTTPServerSession::HTTPServerSession(const StreamSocket& socket, HTTPServerParams::Ptr pParams): - HTTPSession(socket, pParams->getKeepAlive()), - _firstRequest(true), - _keepAliveTimeout(pParams->getKeepAliveTimeout()), - _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) +HTTPServerSession::HTTPServerSession(const StreamSocket & socket, HTTPServerParams::Ptr pParams) + : HTTPSession(socket, pParams->getKeepAlive()) + , _firstRequest(true) + , _keepAliveTimeout(pParams->getKeepAliveTimeout()) + , _maxKeepAliveRequests(pParams->getMaxKeepAliveRequests()) { setTimeout(pParams->getTimeout()); } @@ -52,11 +52,12 @@ bool HTTPServerSession::hasMoreRequests() } else if (_maxKeepAliveRequests != 0 && getKeepAlive()) { - if (_maxKeepAliveRequests > 0) - --_maxKeepAliveRequests; - return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ); - } - else return false; + if (_maxKeepAliveRequests > 0) + --_maxKeepAliveRequests; + return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ); + } + else + return false; } diff --git a/cmake/autogenerated_versions.txt b/cmake/autogenerated_versions.txt index d69646d3694..c82038804fe 100644 --- a/cmake/autogenerated_versions.txt +++ b/cmake/autogenerated_versions.txt @@ -2,11 +2,11 @@ # NOTE: VERSION_REVISION has nothing common with DBMS_TCP_PROTOCOL_VERSION, # only DBMS_TCP_PROTOCOL_VERSION should be incremented on protocol changes. -SET(VERSION_REVISION 54489) +SET(VERSION_REVISION 54490) SET(VERSION_MAJOR 24) -SET(VERSION_MINOR 8) +SET(VERSION_MINOR 9) SET(VERSION_PATCH 1) -SET(VERSION_GITHASH 3f8b27d7accd2b5ec4afe7d0dd459115323304af) -SET(VERSION_DESCRIBE v24.8.1.1-testing) -SET(VERSION_STRING 24.8.1.1) +SET(VERSION_GITHASH e02b434d2fc0c4fbee29ca675deab7474d274608) +SET(VERSION_DESCRIBE v24.9.1.1-testing) +SET(VERSION_STRING 24.9.1.1) # end of autochange diff --git a/cmake/sanitize.cmake b/cmake/sanitize.cmake index d8d211b9329..091e20e1387 100644 --- a/cmake/sanitize.cmake +++ b/cmake/sanitize.cmake @@ -57,8 +57,8 @@ option(WITH_COVERAGE "Instrumentation for code coverage with default implementat if (WITH_COVERAGE) message (STATUS "Enabled instrumentation for code coverage") - set(COVERAGE_FLAGS "SHELL:-fprofile-instr-generate -fcoverage-mapping") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") + set (COVERAGE_FLAGS -fprofile-instr-generate -fcoverage-mapping) + set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-instr-generate -fcoverage-mapping") endif() option (SANITIZE_COVERAGE "Instrumentation for code coverage with custom callbacks" OFF) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index eb3afe0ccdf..d7489bc5c0e 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -179,7 +179,7 @@ else() message(STATUS "Not using QPL") endif () -if (OS_LINUX AND ARCH_AMD64) +if (OS_LINUX AND ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER) option (ENABLE_QATLIB "Enable Intel® QuickAssist Technology Library (QATlib)" ${ENABLE_LIBRARIES}) elseif(ENABLE_QATLIB) message (${RECONFIGURE_MESSAGE_LEVEL} "QATLib is only supported on x86_64") @@ -205,14 +205,12 @@ add_contrib (morton-nd-cmake morton-nd) if (ARCH_S390X) add_contrib(crc32-s390x-cmake crc32-s390x) endif() -add_contrib (annoy-cmake annoy) -option(ENABLE_USEARCH "Enable USearch (Approximate Neighborhood Search, HNSW) support" ${ENABLE_LIBRARIES}) +option(ENABLE_USEARCH "Enable USearch" ${ENABLE_LIBRARIES}) if (ENABLE_USEARCH) add_contrib (FP16-cmake FP16) - add_contrib (robin-map-cmake robin-map) add_contrib (SimSIMD-cmake SimSIMD) - add_contrib (usearch-cmake usearch) # requires: FP16, robin-map, SimdSIMD + add_contrib (usearch-cmake usearch) # requires: FP16, SimdSIMD else () message(STATUS "Not using USearch") endif () diff --git a/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt b/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt index 72d21a8572b..fc18092f574 100644 --- a/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt +++ b/contrib/QAT-ZSTD-Plugin-cmake/CMakeLists.txt @@ -27,7 +27,7 @@ if (ENABLE_QAT_OUT_OF_TREE_BUILD) ${QAT_AL_INCLUDE_DIR} ${QAT_USDM_INCLUDE_DIR} ${ZSTD_LIBRARY_DIR}) - target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC) + target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0) add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin) else () # In-tree build message(STATUS "Intel QATZSTD in-tree build") @@ -78,7 +78,7 @@ else () # In-tree build ${QAT_USDM_INCLUDE_DIR} ${ZSTD_LIBRARY_DIR} ${LIBQAT_HEADER_DIR}) - target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DENABLE_ZSTD_QAT_CODEC -DINTREE) + target_compile_definitions(_qatzstd_plugin PRIVATE -DDEBUGLEVEL=0 PUBLIC -DINTREE) target_include_directories(_qatzstd_plugin SYSTEM PUBLIC $ $) add_library (ch_contrib::qatzstd_plugin ALIAS _qatzstd_plugin) endif () diff --git a/contrib/SimSIMD b/contrib/SimSIMD index de2cb75b9e9..91a76d1ac51 160000 --- a/contrib/SimSIMD +++ b/contrib/SimSIMD @@ -1 +1 @@ -Subproject commit de2cb75b9e9e3389d5e1e51fd9f8ed151f3c17cf +Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26 diff --git a/contrib/annoy b/contrib/annoy deleted file mode 160000 index f2ac8e7b48f..00000000000 --- a/contrib/annoy +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f2ac8e7b48f9a9cf676d3b58286e5455aba8e956 diff --git a/contrib/annoy-cmake/CMakeLists.txt b/contrib/annoy-cmake/CMakeLists.txt deleted file mode 100644 index bdef7d92132..00000000000 --- a/contrib/annoy-cmake/CMakeLists.txt +++ /dev/null @@ -1,24 +0,0 @@ -option(ENABLE_ANNOY "Enable Annoy index support" ${ENABLE_LIBRARIES}) - -# Annoy index should be disabled with undefined sanitizer. Because of memory storage optimizations -# (https://github.com/ClickHouse/annoy/blob/9d8a603a4cd252448589e84c9846f94368d5a289/src/annoylib.h#L442-L463) -# UBSan fails and leads to crash. Simmilar issue is already opened in Annoy repo -# https://github.com/spotify/annoy/issues/456 -# Problem with aligment can lead to errors like -# (https://stackoverflow.com/questions/46790550/c-undefined-behavior-strict-aliasing-rule-or-incorrect-alignment) -# or will lead to crash on arm https://developer.arm.com/documentation/ka003038/latest -# This issues should be resolved before annoy became non-experimental (--> setting "allow_experimental_annoy_index") -if ((NOT ENABLE_ANNOY) OR (SANITIZE STREQUAL "undefined") OR (ARCH_AARCH64)) - message (STATUS "Not using annoy") - return() -endif() - -set(ANNOY_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/annoy") -set(ANNOY_SOURCE_DIR "${ANNOY_PROJECT_DIR}/src") - -add_library(_annoy INTERFACE) -target_include_directories(_annoy SYSTEM INTERFACE ${ANNOY_SOURCE_DIR}) - -add_library(ch_contrib::annoy ALIAS _annoy) -target_compile_definitions(_annoy INTERFACE ENABLE_ANNOY) -target_compile_definitions(_annoy INTERFACE ANNOYLIB_MULTITHREADED_BUILD) diff --git a/contrib/libfiu-cmake/CMakeLists.txt b/contrib/libfiu-cmake/CMakeLists.txt index e805491edbb..eab55087c98 100644 --- a/contrib/libfiu-cmake/CMakeLists.txt +++ b/contrib/libfiu-cmake/CMakeLists.txt @@ -1,20 +1,21 @@ -if (NOT ENABLE_FIU) - message (STATUS "Not using fiu") +if (NOT ENABLE_LIBFIU) + message (STATUS "Not using libfiu") return () endif () -set(FIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/") +set(LIBFIU_DIR "${ClickHouse_SOURCE_DIR}/contrib/libfiu/") -set(FIU_SOURCES - ${FIU_DIR}/libfiu/fiu.c - ${FIU_DIR}/libfiu/fiu-rc.c - ${FIU_DIR}/libfiu/backtrace.c - ${FIU_DIR}/libfiu/wtable.c +set(LIBFIU_SOURCES + ${LIBFIU_DIR}/libfiu/fiu.c + ${LIBFIU_DIR}/libfiu/fiu-rc.c + ${LIBFIU_DIR}/libfiu/backtrace.c + ${LIBFIU_DIR}/libfiu/wtable.c ) -set(FIU_HEADERS "${FIU_DIR}/libfiu") +set(LIBFIU_HEADERS "${LIBFIU_DIR}/libfiu") -add_library(_fiu ${FIU_SOURCES}) -target_compile_definitions(_fiu PUBLIC DUMMY_BACKTRACE) -target_include_directories(_fiu PUBLIC ${FIU_HEADERS}) -add_library(ch_contrib::fiu ALIAS _fiu) +add_library(_libfiu ${LIBFIU_SOURCES}) +target_compile_definitions(_libfiu PUBLIC DUMMY_BACKTRACE) +target_compile_definitions(_libfiu PUBLIC FIU_ENABLE) +target_include_directories(_libfiu PUBLIC ${LIBFIU_HEADERS}) +add_library(ch_contrib::libfiu ALIAS _libfiu) diff --git a/contrib/libprotobuf-mutator b/contrib/libprotobuf-mutator index 1f95f808306..b922c8ab900 160000 --- a/contrib/libprotobuf-mutator +++ b/contrib/libprotobuf-mutator @@ -1 +1 @@ -Subproject commit 1f95f8083066f5b38fd2db172e7e7f9aa7c49d2d +Subproject commit b922c8ab9004ef9944982e4f165e2747b13223fa diff --git a/contrib/libunwind b/contrib/libunwind index a89d904befe..601db0b0e03 160000 --- a/contrib/libunwind +++ b/contrib/libunwind @@ -1 +1 @@ -Subproject commit a89d904befea07814628c6ce0b44083c4e149c62 +Subproject commit 601db0b0e03018c01710470a37703b618f9cf08b diff --git a/contrib/qpl-cmake/CMakeLists.txt b/contrib/qpl-cmake/CMakeLists.txt index e62612cff5a..89332ae0f7a 100644 --- a/contrib/qpl-cmake/CMakeLists.txt +++ b/contrib/qpl-cmake/CMakeLists.txt @@ -728,10 +728,6 @@ add_library(_qpl STATIC ${LIB_DEPS}) target_include_directories(_qpl PUBLIC $ $) - -target_compile_definitions(_qpl - PUBLIC -DENABLE_QPL_COMPRESSION) - target_link_libraries(_qpl PRIVATE ch_contrib::accel-config) diff --git a/contrib/robin-map b/contrib/robin-map deleted file mode 160000 index 851a59e0e30..00000000000 --- a/contrib/robin-map +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 851a59e0e3063ee0e23089062090a73fd3de482d diff --git a/contrib/robin-map-cmake/CMakeLists.txt b/contrib/robin-map-cmake/CMakeLists.txt deleted file mode 100644 index f82ad705dcc..00000000000 --- a/contrib/robin-map-cmake/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -# See contrib/usearch-cmake/CMakeLists.txt diff --git a/contrib/usearch b/contrib/usearch index 30810452bec..e21a5778a0d 160000 --- a/contrib/usearch +++ b/contrib/usearch @@ -1 +1 @@ -Subproject commit 30810452bec5d3d3aa0931bb5d761e2f09aa6356 +Subproject commit e21a5778a0d4469ddaf38c94b7be0196bb701ee4 diff --git a/contrib/usearch-cmake/CMakeLists.txt b/contrib/usearch-cmake/CMakeLists.txt index 29fbe57106c..83221e3810f 100644 --- a/contrib/usearch-cmake/CMakeLists.txt +++ b/contrib/usearch-cmake/CMakeLists.txt @@ -1,17 +1,12 @@ -set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") -set(USEARCH_SOURCE_DIR "${USEARCH_PROJECT_DIR}/include") - set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16") -set(ROBIN_MAP_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/robin-map") -set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD-map") +set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD") +set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch") add_library(_usearch INTERFACE) target_include_directories(_usearch SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include - ${ROBIN_MAP_PROJECT_DIR}/include ${SIMSIMD_PROJECT_DIR}/include - ${USEARCH_SOURCE_DIR}) + ${USEARCH_PROJECT_DIR}/include) add_library(ch_contrib::usearch ALIAS _usearch) -target_compile_definitions(_usearch INTERFACE ENABLE_USEARCH) diff --git a/docker/packager/binary-builder/build.sh b/docker/packager/binary-builder/build.sh index bd5f2fe8466..ba9311c6880 100755 --- a/docker/packager/binary-builder/build.sh +++ b/docker/packager/binary-builder/build.sh @@ -108,7 +108,8 @@ if [ -n "$MAKE_DEB" ]; then bash -x /build/packages/build fi -mv ./programs/clickhouse* /output || mv ./programs/*_fuzzer /output +mv ./programs/clickhouse* /output ||: +mv ./programs/*_fuzzer /output ||: [ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output [ -x ./programs/self-extracting/clickhouse-stripped ] && mv ./programs/self-extracting/clickhouse-stripped /output [ -x ./programs/self-extracting/clickhouse-keeper ] && mv ./programs/self-extracting/clickhouse-keeper /output diff --git a/docker/packager/cctools/Dockerfile b/docker/packager/cctools/Dockerfile index d986c6a3c86..570a42d42d5 100644 --- a/docker/packager/cctools/Dockerfile +++ b/docker/packager/cctools/Dockerfile @@ -1,3 +1,5 @@ +# docker build -t clickhouse/cctools . + # This is a hack to significantly reduce the build time of the clickhouse/binary-builder # It's based on the assumption that we don't care of the cctools version so much # It event does not depend on the clickhouse/fasttest in the `docker/images.json` @@ -30,5 +32,29 @@ RUN git clone https://github.com/tpoechtrager/cctools-port.git \ && cd ../.. \ && rm -rf cctools-port +# +# GDB +# +# ld from binutils is 2.38, which has the following error: +# +# DWARF error: invalid or unhandled FORM value: 0x23 +# +ENV LD=ld.lld-${LLVM_VERSION} +ARG GDB_VERSION=15.1 +RUN apt-get update \ + && apt-get install --yes \ + libgmp-dev \ + libmpfr-dev \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* +RUN wget https://sourceware.org/pub/gdb/releases/gdb-$GDB_VERSION.tar.gz \ + && tar -xvf gdb-$GDB_VERSION.tar.gz \ + && cd gdb-$GDB_VERSION \ + && ./configure --prefix=/opt/gdb \ + && make -j $(nproc) \ + && make install \ + && rm -fr gdb-$GDB_VERSION gdb-$GDB_VERSION.tar.gz + FROM scratch COPY --from=builder /cctools /cctools +COPY --from=builder /opt/gdb /opt/gdb diff --git a/docker/test/fasttest/Dockerfile b/docker/test/fasttest/Dockerfile index 4cac2ee6135..5d311c673a4 100644 --- a/docker/test/fasttest/Dockerfile +++ b/docker/test/fasttest/Dockerfile @@ -83,7 +83,7 @@ RUN arch=${TARGETARCH:-amd64} \ # Give suid to gdb to grant it attach permissions # chmod 777 to make the container user independent -RUN chmod u+s /usr/bin/gdb \ +RUN chmod u+s /opt/gdb/bin/gdb \ && mkdir -p /var/lib/clickhouse \ && chmod 777 /var/lib/clickhouse diff --git a/docker/test/integration/base/Dockerfile b/docker/test/integration/base/Dockerfile index 469251f648c..dc4d470a262 100644 --- a/docker/test/integration/base/Dockerfile +++ b/docker/test/integration/base/Dockerfile @@ -11,7 +11,6 @@ RUN apt-get update \ curl \ default-jre \ g++ \ - gdb \ iproute2 \ krb5-user \ libicu-dev \ @@ -73,3 +72,6 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \ ENV TZ=Etc/UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" diff --git a/docker/test/integration/runner/Dockerfile b/docker/test/integration/runner/Dockerfile index d250b746e7d..d62009f1be3 100644 --- a/docker/test/integration/runner/Dockerfile +++ b/docker/test/integration/runner/Dockerfile @@ -30,7 +30,6 @@ RUN apt-get update \ luajit \ libssl-dev \ libcurl4-openssl-dev \ - gdb \ default-jdk \ software-properties-common \ libkrb5-dev \ @@ -87,6 +86,8 @@ COPY modprobe.sh /usr/local/bin/modprobe COPY dockerd-entrypoint.sh /usr/local/bin/ COPY misc/ /misc/ +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" # Same options as in test/base/Dockerfile # (in case you need to override them in tests) diff --git a/docker/test/performance-comparison/Dockerfile b/docker/test/performance-comparison/Dockerfile index c68a39f6f70..f7139275282 100644 --- a/docker/test/performance-comparison/Dockerfile +++ b/docker/test/performance-comparison/Dockerfile @@ -9,7 +9,6 @@ RUN apt-get update \ curl \ dmidecode \ g++ \ - gdb \ git \ gnuplot \ imagemagick \ @@ -42,6 +41,9 @@ RUN pip3 --no-cache-dir install -r requirements.txt COPY run.sh / +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" + CMD ["bash", "/run.sh"] # docker run --network=host --volume :/workspace --volume=:/output -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/performance-comparison diff --git a/docker/test/stateless/Dockerfile b/docker/test/stateless/Dockerfile index a0e5513a3a2..d8eb072328f 100644 --- a/docker/test/stateless/Dockerfile +++ b/docker/test/stateless/Dockerfile @@ -69,8 +69,8 @@ ENV MAX_RUN_TIME=0 # Unrelated to vars in setup_minio.sh, but should be the same there # to have the same binaries for local running scenario -ARG MINIO_SERVER_VERSION=2022-01-03T18-22-58Z -ARG MINIO_CLIENT_VERSION=2022-01-05T23-52-51Z +ARG MINIO_SERVER_VERSION=2024-08-03T04-33-23Z +ARG MINIO_CLIENT_VERSION=2024-07-31T15-58-33Z ARG TARGETARCH # Download Minio-related binaries diff --git a/docker/test/stateless/run.sh b/docker/test/stateless/run.sh index 3f734bef6a8..c70cbe1fe45 100755 --- a/docker/test/stateless/run.sh +++ b/docker/test/stateless/run.sh @@ -54,8 +54,6 @@ source /utils.lib /usr/share/clickhouse-test/config/install.sh ./setup_minio.sh stateless -./mc admin trace clickminio > /test_output/minio.log & -MC_ADMIN_PID=$! ./setup_hdfs_minicluster.sh @@ -176,6 +174,55 @@ done setup_logs_replication attach_gdb_to_clickhouse +# create tables for minio log webhooks +clickhouse-client --query "CREATE TABLE minio_audit_logs +( + log String, + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '\"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') +) +ENGINE = MergeTree +ORDER BY tuple()" + +clickhouse-client --query "CREATE TABLE minio_server_logs +( + log String, + event_time DateTime64(9) MATERIALIZED parseDateTime64BestEffortOrZero(trim(BOTH '\"' FROM JSONExtractRaw(log, 'time')), 9, 'UTC') +) +ENGINE = MergeTree +ORDER BY tuple()" + +# create minio log webhooks for both audit and server logs +# use async inserts to avoid creating too many parts +./mc admin config set clickminio logger_webhook:ch_server_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_server_logs%20FORMAT%20LineAsString" queue_size=1000000 batch_size=500 +./mc admin config set clickminio audit_webhook:ch_audit_webhook endpoint="http://localhost:8123/?async_insert=1&wait_for_async_insert=0&async_insert_busy_timeout_min_ms=5000&async_insert_busy_timeout_max_ms=5000&async_insert_max_query_number=1000&async_insert_max_data_size=10485760&query=INSERT%20INTO%20minio_audit_logs%20FORMAT%20LineAsString" queue_size=1000000 batch_size=500 + +max_retries=100 +retry=1 +while [ $retry -le $max_retries ]; do + echo "clickminio restart attempt $retry:" + + output=$(./mc admin service restart clickminio --wait --json 2>&1 | jq -r .status) + echo "Output of restart status: $output" + + expected_output="success +success" + if [ "$output" = "$expected_output" ]; then + echo "Restarted clickminio successfully." + break + fi + + sleep 1 + + retry=$((retry + 1)) +done + +if [ $retry -gt $max_retries ]; then + echo "Failed to restart clickminio after $max_retries attempts." +fi + +./mc admin trace clickminio > /test_output/minio.log & +MC_ADMIN_PID=$! + function fn_exists() { declare -F "$1" > /dev/null; } @@ -339,6 +386,14 @@ do fi done + +# collect minio audit and server logs +# wait for minio to flush its batch if it has any +sleep 1 +clickhouse-client -q "SYSTEM FLUSH ASYNC INSERT QUEUE" +clickhouse-client -q "SELECT log FROM minio_audit_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_audit_logs.jsonl.zst' FORMAT JSONEachRow" +clickhouse-client -q "SELECT log FROM minio_server_logs ORDER BY event_time INTO OUTFILE '/test_output/minio_server_logs.jsonl.zst' FORMAT JSONEachRow" + # Stop server so we can safely read data with clickhouse-local. # Why do we read data with clickhouse-local? # Because it's the simplest way to read it when server has crashed. diff --git a/docker/test/stateless/setup_minio.sh b/docker/test/stateless/setup_minio.sh index 2b9433edd20..c1508df7e82 100755 --- a/docker/test/stateless/setup_minio.sh +++ b/docker/test/stateless/setup_minio.sh @@ -59,8 +59,8 @@ find_os() { download_minio() { local os local arch - local minio_server_version=${MINIO_SERVER_VERSION:-2022-09-07T22-25-02Z} - local minio_client_version=${MINIO_CLIENT_VERSION:-2022-08-28T20-08-11Z} + local minio_server_version=${MINIO_SERVER_VERSION:-2024-08-03T04-33-23Z} + local minio_client_version=${MINIO_CLIENT_VERSION:-2024-07-31T15-58-33Z} os=$(find_os) arch=$(find_arch) @@ -82,10 +82,10 @@ setup_minio() { local test_type=$1 ./mc alias set clickminio http://localhost:11111 clickhouse clickhouse ./mc admin user add clickminio test testtest - ./mc admin policy set clickminio readwrite user=test + ./mc admin policy attach clickminio readwrite --user=test ./mc mb --ignore-existing clickminio/test if [ "$test_type" = "stateless" ]; then - ./mc policy set public clickminio/test + ./mc anonymous set public clickminio/test fi } @@ -99,10 +99,9 @@ upload_data() { # iterating over globs will cause redundant file variable to be # a path to a file, not a filename # shellcheck disable=SC2045 - for file in $(ls "${data_path}"); do - echo "${file}"; - ./mc cp "${data_path}"/"${file}" clickminio/test/"${file}"; - done + if [ -d "${data_path}" ]; then + ./mc cp --recursive "${data_path}"/ clickminio/test/ + fi } setup_aws_credentials() { @@ -148,4 +147,4 @@ main() { setup_aws_credentials } -main "$@" \ No newline at end of file +main "$@" diff --git a/docker/test/util/Dockerfile b/docker/test/util/Dockerfile index dc928ba7195..8b949ed95db 100644 --- a/docker/test/util/Dockerfile +++ b/docker/test/util/Dockerfile @@ -44,7 +44,6 @@ RUN apt-get update \ bash \ bsdmainutils \ build-essential \ - gdb \ git \ gperf \ moreutils \ @@ -58,3 +57,6 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* COPY process_functional_tests_result.py / + +COPY --from=clickhouse/cctools:0d6b90a7a490 /opt/gdb /opt/gdb +ENV PATH="/opt/gdb/bin:${PATH}" diff --git a/docs/changelogs/v24.3.7.30-lts.md b/docs/changelogs/v24.3.7.30-lts.md new file mode 100644 index 00000000000..f945a54840f --- /dev/null +++ b/docs/changelogs/v24.3.7.30-lts.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 1 +sidebar_label: 2024 +--- + +# 2024 Changelog + +### ClickHouse release v24.3.7.30-lts (c8a28cf4331) FIXME as compared to v24.3.6.48-lts (b2d33c3c45d) + +#### Improvement +* Backported in [#68103](https://github.com/ClickHouse/ClickHouse/issues/68103): Distinguish booleans and integers while parsing values for custom settings: ``` SET custom_a = true; SET custom_b = 1; ```. [#62206](https://github.com/ClickHouse/ClickHouse/pull/62206) ([Vitaly Baranov](https://github.com/vitlibar)). + +#### Bug Fix (user-visible misbehavior in an official stable release) +* Backported in [#67931](https://github.com/ClickHouse/ClickHouse/issues/67931): Fixing the `Not-ready Set` error after the `PREWHERE` optimization for StorageMerge. [#65057](https://github.com/ClickHouse/ClickHouse/pull/65057) ([Nikolai Kochetov](https://github.com/KochetovNicolai)). +* Backported in [#68062](https://github.com/ClickHouse/ClickHouse/issues/68062): Fix boolean literals in query sent to external database (for engines like `PostgreSQL`). [#66282](https://github.com/ClickHouse/ClickHouse/pull/66282) ([vdimir](https://github.com/vdimir)). +* Backported in [#67812](https://github.com/ClickHouse/ClickHouse/issues/67812): Only relevant to the experimental Variant data type. Fix crash with Variant + AggregateFunction type. [#67122](https://github.com/ClickHouse/ClickHouse/pull/67122) ([Kruglov Pavel](https://github.com/Avogar)). +* Backported in [#67848](https://github.com/ClickHouse/ClickHouse/issues/67848): Fixes [#66026](https://github.com/ClickHouse/ClickHouse/issues/66026). Avoid unresolved table function arguments traversal in `ReplaceTableNodeToDummyVisitor`. [#67522](https://github.com/ClickHouse/ClickHouse/pull/67522) ([Dmitry Novik](https://github.com/novikd)). +* Backported in [#68271](https://github.com/ClickHouse/ClickHouse/issues/68271): Fix inserting into stream like engines (Kafka, RabbitMQ, NATS) through HTTP interface. [#67554](https://github.com/ClickHouse/ClickHouse/pull/67554) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#67806](https://github.com/ClickHouse/ClickHouse/issues/67806): Fix reloading SQL UDFs with UNION. Previously, restarting the server could make UDF invalid. [#67665](https://github.com/ClickHouse/ClickHouse/pull/67665) ([Antonio Andelic](https://github.com/antonio2368)). +* Backported in [#67834](https://github.com/ClickHouse/ClickHouse/issues/67834): Fix potential stack overflow in `JSONMergePatch` function. Renamed this function from `jsonMergePatch` to `JSONMergePatch` because the previous name was wrong. The previous name is still kept for compatibility. Improved diagnostic of errors in the function. This closes [#67304](https://github.com/ClickHouse/ClickHouse/issues/67304). [#67756](https://github.com/ClickHouse/ClickHouse/pull/67756) ([Alexey Milovidov](https://github.com/alexey-milovidov)). +* Backported in [#68206](https://github.com/ClickHouse/ClickHouse/issues/68206): Fix wrong `count()` result when there is non-deterministic function in predicate. [#67922](https://github.com/ClickHouse/ClickHouse/pull/67922) ([János Benjamin Antal](https://github.com/antaljanosbenjamin)). +* Backported in [#68089](https://github.com/ClickHouse/ClickHouse/issues/68089): Fixed the calculation of the maximum thread soft limit in containerized environments where the usable CPU count is limited. [#67963](https://github.com/ClickHouse/ClickHouse/pull/67963) ([Robert Schulze](https://github.com/rschu1ze)). +* Backported in [#68120](https://github.com/ClickHouse/ClickHouse/issues/68120): Fixed skipping of untouched parts in mutations with new analyzer. Previously with enabled analyzer data in part could be rewritten by mutation even if mutation doesn't affect this part according to predicate. [#68052](https://github.com/ClickHouse/ClickHouse/pull/68052) ([Anton Popov](https://github.com/CurtizJ)). + +#### NOT FOR CHANGELOG / INSIGNIFICANT + +* Update version after release. [#67676](https://github.com/ClickHouse/ClickHouse/pull/67676) ([robot-clickhouse](https://github.com/robot-clickhouse)). +* Backported in [#68074](https://github.com/ClickHouse/ClickHouse/issues/68074): Add an explicit error for `ALTER MODIFY SQL SECURITY` on non-view tables. [#67953](https://github.com/ClickHouse/ClickHouse/pull/67953) ([pufit](https://github.com/pufit)). + diff --git a/docs/en/development/tests.md b/docs/en/development/tests.md index 269995a1a96..6cb36e2049b 100644 --- a/docs/en/development/tests.md +++ b/docs/en/development/tests.md @@ -14,7 +14,7 @@ Each functional test sends one or multiple queries to the running ClickHouse ser Tests are located in `queries` directory. There are two subdirectories: `stateless` and `stateful`. Stateless tests run queries without any preloaded test data - they often create small synthetic datasets on the fly, within the test itself. Stateful tests require preloaded test data from ClickHouse and it is available to general public. -Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client --multiquery`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. +Each test can be one of two types: `.sql` and `.sh`. `.sql` test is the simple SQL script that is piped to `clickhouse-client`. `.sh` test is a script that is run by itself. SQL tests are generally preferable to `.sh` tests. You should use `.sh` tests only when you have to test some feature that cannot be exercised from pure SQL, such as piping some input data into `clickhouse-client` or testing `clickhouse-local`. :::note A common mistake when testing data types `DateTime` and `DateTime64` is assuming that the server uses a specific time zone (e.g. "UTC"). This is not the case, time zones in CI test runs @@ -38,7 +38,7 @@ For more options, see `tests/clickhouse-test --help`. You can simply run all tes ### Adding a New Test -To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client --multiquery < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. +To add new test, create a `.sql` or `.sh` file in `queries/0_stateless` directory, check it manually and then generate `.reference` file in the following way: `clickhouse-client < 00000_test.sql > 00000_test.reference` or `./00000_test.sh > ./00000_test.reference`. Tests should use (create, drop, etc) only tables in `test` database that is assumed to be created beforehand; also tests can use temporary tables. diff --git a/docs/en/engines/table-engines/mergetree-family/annindexes.md b/docs/en/engines/table-engines/mergetree-family/annindexes.md index 5a81313f62e..097b0f5850a 100644 --- a/docs/en/engines/table-engines/mergetree-family/annindexes.md +++ b/docs/en/engines/table-engines/mergetree-family/annindexes.md @@ -17,7 +17,7 @@ In terms of SQL, the nearest neighborhood problem can be expressed as follows: ``` sql SELECT * -FROM table_with_ann_index +FROM table ORDER BY Distance(vectors, Point) LIMIT N ``` @@ -27,75 +27,111 @@ Function `Distance` computes the distance between two vectors. Often, the Euclid distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17, 0.33, ...)`, and `N` limits the number of search results. -An alternative formulation of the nearest neighborhood search problem looks as follows: +This query returns the top-`N` closest points to the reference point. Parameter `N` limits the number of returned values which is useful for +situations where `MaxDistance` is difficult to determine in advance. -``` sql -SELECT * -FROM table_with_ann_index -WHERE Distance(vectors, Point) < MaxDistance -LIMIT N -``` - -While the first query returns the top-`N` closest points to the reference point, the second query returns all points closer to the reference -point than a maximally allowed radius `MaxDistance`. Parameter `N` limits the number of returned values which is useful for situations where -`MaxDistance` is difficult to determine in advance. - -With brute force search, both queries are expensive (linear in the number of points) because the distance between all points in `vectors` and +With brute force search, the query is expensive (linear in the number of points) because the distance between all points in `vectors` and `Point` must be computed. To speed this process up, Approximate Nearest Neighbor Search Indexes (ANN indexes) store a compact representation of the search space (using clustering, search trees, etc.) which allows to compute an approximate answer much quicker (in sub-linear time). -# Creating and Using ANN Indexes {#creating_using_ann_indexes} +# Creating and Using Vector Similarity Indexes -Syntax to create an ANN index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column: +Syntax to create a vector similarity index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column: ```sql -CREATE TABLE table_with_ann_index +CREATE TABLE table ( - `id` Int64, - `vectors` Array(Float32), - INDEX [ann_index_name vectors TYPE [ann_index_type]([ann_index_parameters]) [GRANULARITY [N]] + id Int64, + vectors Array(Float32), + INDEX index_name vectors TYPE vector_similarity(method, distance_function[, quantization, connectivity, expansion_add, expansion_search]) [GRANULARITY N] ) ENGINE = MergeTree ORDER BY id; ``` +Parameters: +- `method`: Supports currently only `hnsw`. +- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a + line between two points in Euclidean space), or `cosineDistance` (the [cosine + distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors). +- `quantization`: either `f32`, `f16`, or `i8` for storing the vector with reduced precision (optional, default: `f32`) +- `m`: the number of neighbors per graph node (optional, default: 16) +- `ef_construction`: (optional, default: 128) +- `ef_search`: (optional, default: 64) + +Value 0 for parameters `m`, `ef_construction`, and `ef_search` refers to the default value. + +Example: + +```sql +CREATE TABLE table +( + id Int64, + vectors Array(Float32), + INDEX idx vectors TYPE vector_similarity('hnsw', 'L2Distance') -- Alternative syntax: TYPE vector_similarity(hnsw, L2Distance) +) +ENGINE = MergeTree +ORDER BY id; +``` + +Vector similarity indexes are based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW +algorithm](https://arxiv.org/abs/1603.09320), i.e., a hierarchical graph where each point represents a vector and the edges represent +similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the +overall dataset, while still providing 99% recall. This is especially useful when working with high-dimensional vectors, that are expensive +to load and compare. The library also has several hardware-specific SIMD optimizations to accelerate further distance computations on modern +Arm (NEON and SVE) and x86 (AVX2 and AVX-512) CPUs and OS-specific optimizations to allow efficient navigation around immutable persistent +files, without loading them into RAM. + +USearch indexes are currently experimental, to use them you first need to `SET allow_experimental_vector_similarity_index = 1`. + +Vector similarity indexes currently support two distance functions: +- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space + ([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)). +- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors + ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). + +Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`. +If no scalar kind was specified during index creation, `f16` is used as default. + +For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no +distance function was specified during index creation, `L2Distance` is used as default. + +:::note +All arrays must have same length. To avoid errors, you can use a +[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT constraint_name_1 CHECK +length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default values) are not supported. +::: + +:::note +The vector similarity index currently does not work with per-table, non-default `index_granularity` settings (see +[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml. +::: + ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write requests. -ANN indexes support two types of queries: - -- ORDER BY queries: +ANN indexes support these queries: ``` sql SELECT * - FROM table_with_ann_index + FROM table [WHERE ...] ORDER BY Distance(vectors, Point) LIMIT N ``` -- WHERE queries: - - ``` sql - SELECT * - FROM table_with_ann_index - WHERE Distance(vectors, Point) < MaxDistance - LIMIT N - ``` - :::tip To avoid writing out large vectors, you can use [query parameters](/docs/en/interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters), e.g. ```bash -clickhouse-client --param_vec='hello' --query="SELECT * FROM table_with_ann_index WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0" +clickhouse-client --param_vec='hello' --query="SELECT * FROM table WHERE L2Distance(vectors, {vec: Array(Float32)}) < 1.0" ``` ::: -**Restrictions**: Queries that contain both a `WHERE Distance(vectors, Point) < MaxDistance` and an `ORDER BY Distance(vectors, Point)` -clause cannot use ANN indexes. Also, the approximate algorithms used to determine the nearest neighbors require a limit, hence queries -without `LIMIT` clause cannot utilize ANN indexes. Also, ANN indexes are only used if the query has a `LIMIT` value smaller than setting +**Restrictions**: Approximate algorithms used to determine the nearest neighbors require a limit, hence queries without `LIMIT` clause +cannot utilize ANN indexes. Also, ANN indexes are only used if the query has a `LIMIT` value smaller than setting `max_limit_for_ann_queries` (default: 1 million rows). This is a safeguard to prevent large memory allocations by external libraries for approximate neighbor search. @@ -122,128 +158,3 @@ brute-force distance calculation over all rows of the granules. With a small `GR equally good, only the processing performance differs. It is generally recommended to use a large `GRANULARITY` for ANN indexes and fall back to a smaller `GRANULARITY` values only in case of problems like excessive memory consumption of the ANN structures. If no `GRANULARITY` was specified for ANN indexes, the default value is 100 million. - - -# Available ANN Indexes {#available_ann_indexes} - -- [Annoy](/docs/en/engines/table-engines/mergetree-family/annindexes.md#annoy-annoy) - -- [USearch](/docs/en/engines/table-engines/mergetree-family/annindexes.md#usearch-usearch) - -## Annoy {#annoy} - -Annoy indexes are currently experimental, to use them you first need to `SET allow_experimental_annoy_index = 1`. They are also currently -disabled on ARM due to memory safety problems with the algorithm. - -This type of ANN index is based on the [Annoy library](https://github.com/spotify/annoy) which recursively divides the space into random -linear surfaces (lines in 2D, planes in 3D etc.). - -
- -
- -Syntax to create an Annoy index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column: - -```sql -CREATE TABLE table_with_annoy_index -( - id Int64, - vectors Array(Float32), - INDEX [ann_index_name] vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N] -) -ENGINE = MergeTree -ORDER BY id; -``` - -Annoy currently supports two distance functions: -- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space - ([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)). -- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors - ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). - -For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no -distance function was specified during index creation, `L2Distance` is used as default. - -Parameter `NumTrees` is the number of trees which the algorithm creates (default if not specified: 100). Higher values of `NumTree` mean -more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes. - -:::note -All arrays must have same length. To avoid errors, you can use a -[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT constraint_name_1 CHECK -length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default values) are not supported. -::: - -The creation of Annoy indexes (whenever a new part is build, e.g. at the end of a merge) is a relatively slow process. You can increase -setting `max_threads_for_annoy_index_creation` (default: 4) which controls how many threads are used to create an Annoy index. Please be -careful with this setting, it is possible that multiple indexes are created in parallel in which case there can be overparallelization. - -Setting `annoy_index_search_k_nodes` (default: `NumTrees * LIMIT`) determines how many tree nodes are inspected during SELECTs. Larger -values mean more accurate results at the cost of longer query runtime: - -```sql -SELECT * -FROM table_name -ORDER BY L2Distance(vectors, Point) -LIMIT N -SETTINGS annoy_index_search_k_nodes=100; -``` - -:::note -The Annoy index currently does not work with per-table, non-default `index_granularity` settings (see -[here](https://github.com/ClickHouse/ClickHouse/pull/51325#issuecomment-1605920475)). If necessary, the value must be changed in config.xml. -::: - -## USearch {#usearch} - -This type of ANN index is based on the [USearch library](https://github.com/unum-cloud/usearch), which implements the [HNSW -algorithm](https://arxiv.org/abs/1603.09320), i.e., builds a hierarchical graph where each point represents a vector and the edges represent -similarity. Such hierarchical structures can be very efficient on large collections. They may often fetch 0.05% or less data from the -overall dataset, while still providing 99% recall. This is especially useful when working with high-dimensional vectors, -that are expensive to load and compare. The library also has several hardware-specific SIMD optimizations to accelerate further -distance computations on modern Arm (NEON and SVE) and x86 (AVX2 and AVX-512) CPUs and OS-specific optimizations to allow efficient -navigation around immutable persistent files, without loading them into RAM. - -
- -
- -Syntax to create an USearch index over an [Array](../../../sql-reference/data-types/array.md) column: - -```sql -CREATE TABLE table_with_usearch_index -( - id Int64, - vectors Array(Float32), - INDEX [ann_index_name] vectors TYPE usearch([Distance[, ScalarKind]]) [GRANULARITY N] -) -ENGINE = MergeTree -ORDER BY id; -``` - -USearch currently supports two distance functions: -- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space - ([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)). -- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors - ([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)). - -USearch allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`. If no scalar kind -was specified during index creation, `f16` is used as default. - -For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no -distance function was specified during index creation, `L2Distance` is used as default. diff --git a/docs/en/interfaces/prometheus.md b/docs/en/interfaces/prometheus.md index bf541901b34..8e7023cc51f 100644 --- a/docs/en/interfaces/prometheus.md +++ b/docs/en/interfaces/prometheus.md @@ -75,7 +75,7 @@ Data are received by this protocol and written to a [TimeSeries](/en/engines/tab /write - remote_writeremote_write db_name time_series_table
@@ -105,7 +105,7 @@ Data are read from a [TimeSeries](/en/engines/table-engines/special/time_series) /read - remote_readremote_read db_name time_series_table
@@ -144,14 +144,14 @@ Multiple protocols can be specified together in one place: /write - remote_writeremote_write db_name.time_series_table
/read - remote_readremote_read db_name.time_series_table
diff --git a/docs/en/operations/query-cache.md b/docs/en/operations/query-cache.md index 7a920671fc2..384938e28f6 100644 --- a/docs/en/operations/query-cache.md +++ b/docs/en/operations/query-cache.md @@ -143,6 +143,18 @@ value can be specified at session, profile or query level using setting [query_c Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries). +Sometimes it is useful to keep multiple results for the same query cached. This can be achieved using setting +[query_cache_tag](settings/settings.md#query-cache-tag) that acts as as a label (or namespace) for a query cache entries. The query cache +considers results of the same query with different tags different. + +Example for creating three different query cache entries for the same query: + +```sql +SELECT 1 SETTINGS use_query_cache = true; -- query_cache_tag is implicitly '' (empty string) +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 1'; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'tag 2'; +``` + ClickHouse reads table data in blocks of [max_block_size](settings/settings.md#setting-max_block_size) rows. Due to filtering, aggregation, etc., result blocks are typically much smaller than 'max_block_size' but there are also cases where they are much bigger. Setting [query_cache_squash_partial_results](settings/settings.md#query-cache-squash-partial-results) (enabled by default) controls if result blocks diff --git a/docs/en/operations/server-configuration-parameters/settings.md b/docs/en/operations/server-configuration-parameters/settings.md index 68f61650e00..9fce83a0dc4 100644 --- a/docs/en/operations/server-configuration-parameters/settings.md +++ b/docs/en/operations/server-configuration-parameters/settings.md @@ -1400,6 +1400,16 @@ The number of seconds that ClickHouse waits for incoming requests before closing 10 ``` +## max_keep_alive_requests {#max-keep-alive-requests} + +Maximal number of requests through a single keep-alive connection until it will be closed by ClickHouse server. Default to 10000. + +**Example** + +``` xml +10 +``` + ## listen_host {#listen_host} Restriction on hosts that requests can come from. If you want the server to answer all of them, specify `::`. diff --git a/docs/en/operations/settings/merge-tree-settings.md b/docs/en/operations/settings/merge-tree-settings.md index 67fa45c20cd..a3bd919d3ce 100644 --- a/docs/en/operations/settings/merge-tree-settings.md +++ b/docs/en/operations/settings/merge-tree-settings.md @@ -1041,3 +1041,14 @@ Compression rates of LZ4 or ZSTD improve on average by 20-40%. This setting works best for tables with no primary key or a low-cardinality primary key, i.e. a table with only few distinct primary key values. High-cardinality primary keys, e.g. involving timestamp columns of type `DateTime64`, are not expected to benefit from this setting. + +### deduplicate_merge_projection_mode + +Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting. +It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. + +Possible values: + +- throw, drop, rebuild + +Default value: throw \ No newline at end of file diff --git a/docs/en/operations/settings/settings.md b/docs/en/operations/settings/settings.md index e432f4e038f..03ff6067a8f 100644 --- a/docs/en/operations/settings/settings.md +++ b/docs/en/operations/settings/settings.md @@ -1800,6 +1800,17 @@ Possible values: Default value: `0`. +## query_cache_tag {#query-cache-tag} + +A string which acts as a label for [query cache](../query-cache.md) entries. +The same queries with different tags are considered different by the query cache. + +Possible values: + +- Any string + +Default value: `''` + ## query_cache_max_size_in_bytes {#query-cache-max-size-in-bytes} The maximum amount of memory (in bytes) the current user may allocate in the [query cache](../query-cache.md). 0 means unlimited. @@ -5627,6 +5638,12 @@ Disable all insert and mutations (alter table update / alter table delete / alte Default value: `false`. +## use_hive_partitioning + +When enabled, ClickHouse will detect Hive-style partitioning in path (`/name=value/`) in file-like table engines [File](../../engines/table-engines/special/file.md#hive-style-partitioning)/[S3](../../engines/table-engines/integrations/s3.md#hive-style-partitioning)/[URL](../../engines/table-engines/special/url.md#hive-style-partitioning)/[HDFS](../../engines/table-engines/integrations/hdfs.md#hive-style-partitioning)/[AzureBlobStorage](../../engines/table-engines/integrations/azureBlobStorage.md#hive-style-partitioning) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +Default value: `false`. + ## allow_experimental_time_series_table {#allow-experimental-time-series-table} Allows creation of tables with the [TimeSeries](../../engines/table-engines/integrations/time-series.md) table engine. diff --git a/docs/en/operations/system-tables/kafka_consumers.md b/docs/en/operations/system-tables/kafka_consumers.md index 7e28a251e26..d58c9f754fd 100644 --- a/docs/en/operations/system-tables/kafka_consumers.md +++ b/docs/en/operations/system-tables/kafka_consumers.md @@ -24,6 +24,7 @@ Columns: - `num_rebalance_revocations`, (UInt64) - number of times the consumer was revoked its partitions - `num_rebalance_assignments`, (UInt64) - number of times the consumer was assigned to Kafka cluster - `is_currently_used`, (UInt8) - consumer is in use +- `last_used`, (UInt64) - last time this consumer was in use, unix time in microseconds - `rdkafka_stat` (String) - library internal statistic. See https://github.com/ClickHouse/librdkafka/blob/master/STATISTICS.md . Set `statistics_interval_ms` to 0 disable, default is 3000 (once in three seconds). Example: diff --git a/docs/en/operations/system-tables/query_cache.md b/docs/en/operations/system-tables/query_cache.md index a9f86f5fc2b..9c48574a329 100644 --- a/docs/en/operations/system-tables/query_cache.md +++ b/docs/en/operations/system-tables/query_cache.md @@ -9,6 +9,7 @@ Columns: - `query` ([String](../../sql-reference/data-types/string.md)) — Query string. - `result_size` ([UInt64](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Size of the query cache entry. +- `tag` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) — Tag of the query cache entry. - `stale` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is stale. - `shared` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is shared between multiple users. - `compressed` ([UInt8](../../sql-reference/data-types/int-uint.md)) — If the query cache entry is compressed. @@ -26,6 +27,7 @@ Row 1: ────── query: SELECT 1 SETTINGS use_query_cache = 1 result_size: 128 +tag: stale: 0 shared: 0 compressed: 1 diff --git a/docs/en/sql-reference/data-types/dynamic.md b/docs/en/sql-reference/data-types/dynamic.md index 8be81471377..f9befd166fe 100644 --- a/docs/en/sql-reference/data-types/dynamic.md +++ b/docs/en/sql-reference/data-types/dynamic.md @@ -14,7 +14,7 @@ To declare a column of `Dynamic` type, use the following syntax: Dynamic(max_types=N) ``` -Where `N` is an optional parameter between `1` and `255` indicating how many different data types can be stored inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all new types will be converted to type `String`. Default value of `max_types` is `32`. +Where `N` is an optional parameter between `0` and `254` indicating how many different data types can be stored as separate subcolumns inside a column with type `Dynamic` across single block of data that is stored separately (for example across single data part for MergeTree table). If this limit is exceeded, all values with new types will be stored together in a special shared data structure in binary form. Default value of `max_types` is `32`. :::note The Dynamic data type is an experimental feature. To use it, set `allow_experimental_dynamic_type = 1`. @@ -224,41 +224,43 @@ SELECT d::Dynamic(max_types=5) as d2, dynamicType(d2) FROM test; └───────┴────────────────┘ ``` -If `K < N`, then the values with the rarest types are converted to `String`: +If `K < N`, then the values with the rarest types will be inserted into a single special subcolumn, but still will be accessible: ```text CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory; INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]); -SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2) FROM test; +SELECT d, dynamicType(d), d::Dynamic(max_types=2) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test; ``` ```text -┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┐ -│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ -│ 42 │ Int64 │ 42 │ Int64 │ -│ 43 │ Int64 │ 43 │ Int64 │ -│ 42.42 │ String │ 42.42 │ String │ -│ true │ Bool │ true │ String │ -│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String │ -└─────────┴────────────────┴─────────┴─────────────────┘ +┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─┐ +│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ false │ +│ 42 │ Int64 │ 42 │ Int64 │ false │ +│ 43 │ Int64 │ 43 │ Int64 │ false │ +│ 42.42 │ String │ 42.42 │ String │ false │ +│ true │ Bool │ true │ Bool │ true │ +│ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true │ +└─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────┘ ``` -If `K=1`, all types are converted to `String`: +Functions `isDynamicElementInSharedData` returns `true` for rows that are stored in a special shared data structure inside `Dynamic` and as we can see, resulting column contains only 2 types that are not stored in shared data structure. + +If `K=0`, all types will be inserted into single special subcolumn: ```text CREATE TABLE test (d Dynamic(max_types=4)) ENGINE = Memory; INSERT INTO test VALUES (NULL), (42), (43), ('42.42'), (true), ([1, 2, 3]); -SELECT d, dynamicType(d), d::Dynamic(max_types=1) as d2, dynamicType(d2) FROM test; +SELECT d, dynamicType(d), d::Dynamic(max_types=0) as d2, dynamicType(d2), isDynamicElementInSharedData(d2) FROM test; ``` ```text -┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┐ -│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ -│ 42 │ Int64 │ 42 │ String │ -│ 43 │ Int64 │ 43 │ String │ -│ 42.42 │ String │ 42.42 │ String │ -│ true │ Bool │ true │ String │ -│ [1,2,3] │ Array(Int64) │ [1,2,3] │ String │ -└─────────┴────────────────┴─────────┴─────────────────┘ +┌─d───────┬─dynamicType(d)─┬─d2──────┬─dynamicType(d2)─┬─isDynamicElementInSharedData(d2)─┐ +│ ᴺᵁᴸᴸ │ None │ ᴺᵁᴸᴸ │ None │ false │ +│ 42 │ Int64 │ 42 │ Int64 │ true │ +│ 43 │ Int64 │ 43 │ Int64 │ true │ +│ 42.42 │ String │ 42.42 │ String │ true │ +│ true │ Bool │ true │ Bool │ true │ +│ [1,2,3] │ Array(Int64) │ [1,2,3] │ Array(Int64) │ true │ +└─────────┴────────────────┴─────────┴─────────────────┴──────────────────────────────────┘ ``` ## Reading Dynamic type from the data @@ -411,17 +413,17 @@ SELECT d, dynamicType(d) FROM test ORDER by d; ## Reaching the limit in number of different data types stored inside Dynamic -`Dynamic` data type can store only limited number of different data types inside. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 1 and 255 (due to implementation details, it's impossible to have more than 255 different data types inside Dynamic). -When the limit is reached, all new data types inserted to `Dynamic` column will be casted to `String` and stored as `String` values. +`Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic). +When the limit is reached, all new data types inserted to `Dynamic` column will be inserted into a single shared data structure that stores values with different data types in binary form. Let's see what happens when the limit is reached in different scenarios. ### Reaching the limit during data parsing -During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted as `String` values: +During parsing of `Dynamic` values from the data, when the limit is reached for current block of data, all new values will be inserted into shared data structure: ```sql -SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' +SELECT d, dynamicType(d), isDynamicElementInSharedData(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' {"d" : 42} {"d" : [1, 2, 3]} {"d" : "Hello, World!"} @@ -432,22 +434,22 @@ SELECT d, dynamicType(d) FROM format(JSONEachRow, 'd Dynamic(max_types=3)', ' ``` ```text -┌─d──────────────────────────┬─dynamicType(d)─┐ -│ 42 │ Int64 │ -│ [1,2,3] │ Array(Int64) │ -│ Hello, World! │ String │ -│ 2020-01-01 │ String │ -│ ["str1", "str2", "str3"] │ String │ -│ {"a" : 1, "b" : [1, 2, 3]} │ String │ -└────────────────────────────┴────────────────┘ +┌─d──────────────────────┬─dynamicType(d)─────────────────┬─isDynamicElementInSharedData(d)─┐ +│ 42 │ Int64 │ false │ +│ [1,2,3] │ Array(Int64) │ false │ +│ Hello, World! │ String │ false │ +│ 2020-01-01 │ Date │ true │ +│ ['str1','str2','str3'] │ Array(String) │ true │ +│ (1,[1,2,3]) │ Tuple(a Int64, b Array(Int64)) │ true │ +└────────────────────────┴────────────────────────────────┴─────────────────────────────────┘ ``` -As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were converted to `String`. +As we can see, after inserting 3 different data types `Int64`, `Array(Int64)` and `String` all new types were inserted into special shared data structure. ### During merges of data parts in MergeTree table engines -During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types inside and won't be able to store all types from source parts. -In this case ClickHouse chooses what types will remain after merge and what types will be casted to `String`. In most cases ClickHouse tries to keep the most frequent types and cast the rarest types to `String`, but it depends on the implementation. +During merge of several data parts in MergeTree table the `Dynamic` column in the resulting data part can reach the limit of different data types that can be stored in separate subcolumns inside and won't be able to store all types as subcolumns from source parts. +In this case ClickHouse chooses what types will remain as separate subcolumns after merge and what types will be inserted into shared data structure. In most cases ClickHouse tries to keep the most frequent types and store the rarest types in shared data structure, but it depends on the implementation. Let's see an example of such merge. First, let's create a table with `Dynamic` column, set the limit of different data types to `3` and insert values with `5` different types: @@ -463,17 +465,17 @@ INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(1); Each insert will create a separate data pert with `Dynamic` column containing single type: ```sql -SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count(); ``` ```text -┌─count()─┬─dynamicType(d)──────┬─_part─────┐ -│ 5 │ UInt64 │ all_1_1_0 │ -│ 4 │ Array(UInt64) │ all_2_2_0 │ -│ 3 │ Date │ all_3_3_0 │ -│ 2 │ Map(UInt64, UInt64) │ all_4_4_0 │ -│ 1 │ String │ all_5_5_0 │ -└─────────┴─────────────────────┴───────────┘ +┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┐ +│ 5 │ UInt64 │ false │ all_1_1_0 │ +│ 4 │ Array(UInt64) │ false │ all_2_2_0 │ +│ 3 │ Date │ false │ all_3_3_0 │ +│ 2 │ Map(UInt64, UInt64) │ false │ all_4_4_0 │ +│ 1 │ String │ false │ all_5_5_0 │ +└─────────┴─────────────────────┴─────────────────────────────────┴───────────┘ ``` Now, let's merge all parts into one and see what will happen: @@ -481,18 +483,20 @@ Now, let's merge all parts into one and see what will happen: ```sql SYSTEM START MERGES test; OPTIMIZE TABLE test FINAL; -SELECT count(), dynamicType(d), _part FROM test GROUP BY _part, dynamicType(d) ORDER BY _part; +SELECT count(), dynamicType(d), isDynamicElementInSharedData(d), _part FROM test GROUP BY _part, dynamicType(d), isDynamicElementInSharedData(d) ORDER BY _part, count() desc; ``` ```text -┌─count()─┬─dynamicType(d)─┬─_part─────┐ -│ 5 │ UInt64 │ all_1_5_2 │ -│ 6 │ String │ all_1_5_2 │ -│ 4 │ Array(UInt64) │ all_1_5_2 │ -└─────────┴────────────────┴───────────┘ +┌─count()─┬─dynamicType(d)──────┬─isDynamicElementInSharedData(d)─┬─_part─────┐ +│ 5 │ UInt64 │ false │ all_1_5_2 │ +│ 4 │ Array(UInt64) │ false │ all_1_5_2 │ +│ 3 │ Date │ false │ all_1_5_2 │ +│ 2 │ Map(UInt64, UInt64) │ true │ all_1_5_2 │ +│ 1 │ String │ true │ all_1_5_2 │ +└─────────┴─────────────────────┴─────────────────────────────────┴───────────┘ ``` -As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` and casted all other types to `String`. +As we can see, ClickHouse kept the most frequent types `UInt64` and `Array(UInt64)` as subcolumns and inserted all other types into shared data. ## JSONExtract functions with Dynamic @@ -509,22 +513,23 @@ SELECT JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Dynamic') AS dynamic, dynamicType( ``` ```sql -SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))') AS map_of_dynamics, mapApply((k, v) -> (k, variantType(v)), map_of_dynamics) AS map_of_dynamic_types``` +SELECT JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Dynamic)') AS map_of_dynamics, mapApply((k, v) -> (k, dynamicType(v)), map_of_dynamics) AS map_of_dynamic_types +``` ```text -┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────┐ -│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'UInt32','b':'String','c':'Array(UInt32)'} │ -└──────────────────────────────────┴─────────────────────────────────────────────────┘ +┌─map_of_dynamics──────────────────┬─map_of_dynamic_types────────────────────────────────────┐ +│ {'a':42,'b':'Hello','c':[1,2,3]} │ {'a':'Int64','b':'String','c':'Array(Nullable(Int64))'} │ +└──────────────────────────────────┴─────────────────────────────────────────────────────────┘ ``` ```sql -SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') AS dynamics, arrayMap(x -> (x.1, variantType(x.2)), dynamics) AS dynamic_types``` +SELECT JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Dynamic') AS dynamics, arrayMap(x -> (x.1, dynamicType(x.2)), dynamics) AS dynamic_types``` ``` ```text -┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────┐ -│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','UInt32'),('b','String'),('c','Array(UInt32)')] │ -└────────────────────────────────────────┴───────────────────────────────────────────────────────┘ +┌─dynamics───────────────────────────────┬─dynamic_types─────────────────────────────────────────────────┐ +│ [('a',42),('b','Hello'),('c',[1,2,3])] │ [('a','Int64'),('b','String'),('c','Array(Nullable(Int64))')] │ +└────────────────────────────────────────┴───────────────────────────────────────────────────────────────┘ ``` ### Binary output format diff --git a/docs/en/sql-reference/data-types/geo.md b/docs/en/sql-reference/data-types/geo.md index 7ffc7447d96..8ce53bb2ef2 100644 --- a/docs/en/sql-reference/data-types/geo.md +++ b/docs/en/sql-reference/data-types/geo.md @@ -52,6 +52,48 @@ Result: └───────────────────────────────┴───────────────┘ ``` +## LineString + +`LineString` is a line stored as an array of points: [Array](array.md)([Point](#point)). + +**Example** + +Query: + +```sql +CREATE TABLE geo_linestring (l LineString) ENGINE = Memory(); +INSERT INTO geo_linestring VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]); +SELECT l, toTypeName(l) FROM geo_linestring; +``` +Result: + +``` text +┌─r─────────────────────────────┬─toTypeName(r)─┐ +│ [(0,0),(10,0),(10,10),(0,10)] │ LineString │ +└───────────────────────────────┴───────────────┘ +``` + +## MultiLineString + +`MultiLineString` is multiple lines stored as an array of `LineString`: [Array](array.md)([LineString](#linestring)). + +**Example** + +Query: + +```sql +CREATE TABLE geo_multilinestring (l MultiLineString) ENGINE = Memory(); +INSERT INTO geo_multilinestring VALUES([[(0, 0), (10, 0), (10, 10), (0, 10)], [(1, 1), (2, 2), (3, 3)]]); +SELECT l, toTypeName(l) FROM geo_multilinestring; +``` +Result: + +``` text +┌─l───────────────────────────────────────────────────┬─toTypeName(l)───┐ +│ [[(0,0),(10,0),(10,10),(0,10)],[(1,1),(2,2),(3,3)]] │ MultiLineString │ +└─────────────────────────────────────────────────────┴─────────────────┘ +``` + ## Polygon `Polygon` is a polygon with holes stored as an array of rings: [Array](array.md)([Ring](#ring)). First element of outer array is the outer shape of polygon and all the following elements are holes. diff --git a/docs/en/sql-reference/functions/geo/polygon.md b/docs/en/sql-reference/functions/geo/polygon.md index 25a7a1fac8e..be9e9810626 100644 --- a/docs/en/sql-reference/functions/geo/polygon.md +++ b/docs/en/sql-reference/functions/geo/polygon.md @@ -6,11 +6,13 @@ title: "Functions for Working with Polygons" ## WKT -Returns a WKT (Well Known Text) geometric object from various [Geo Data Types](../../data-types/geo.md). Supported WKT objects are: +Returns a WKT (Well Known Text) geometric object from various [Geo Data Types](../../data-types/geo.md). Supported WKT objects are: - POINT - POLYGON - MULTIPOLYGON +- LINESTRING +- MULTILINESTRING **Syntax** @@ -26,12 +28,16 @@ WKT(geo_data) - [Ring](../../data-types/geo.md#ring) - [Polygon](../../data-types/geo.md#polygon) - [MultiPolygon](../../data-types/geo.md#multipolygon) +- [LineString](../../data-types/geo.md#linestring) +- [MultiLineString](../../data-types/geo.md#multilinestring) **Returned value** - WKT geometric object `POINT` is returned for a Point. - WKT geometric object `POLYGON` is returned for a Polygon -- WKT geometric object `MULTIPOLYGON` is returned for a MultiPolygon. +- WKT geometric object `MULTIPOLYGON` is returned for a MultiPolygon. +- WKT geometric object `LINESTRING` is returned for a LineString. +- WKT geometric object `MULTILINESTRING` is returned for a MultiLineString. **Examples** @@ -84,7 +90,7 @@ SELECT ### Input parameters -String starting with `MULTIPOLYGON` +String starting with `MULTIPOLYGON` ### Returned value @@ -170,6 +176,34 @@ SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); [(1,1),(2,2),(3,3),(1,1)] ``` +## readWKTMultiLineString + +Parses a Well-Known Text (WKT) representation of a MultiLineString geometry and returns it in the internal ClickHouse format. + +### Syntax + +```sql +readWKTMultiLineString(wkt_string) +``` + +### Arguments + +- `wkt_string`: The input WKT string representing a MultiLineString geometry. + +### Returned value + +The function returns a ClickHouse internal representation of the multilinestring geometry. + +### Example + +```sql +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))'); +``` + +```response +[[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] +``` + ## readWKTRing Parses a Well-Known Text (WKT) representation of a Polygon geometry and returns a ring (closed linestring) in the internal ClickHouse format. @@ -219,7 +253,7 @@ UInt8, 0 for false, 1 for true ## polygonsDistanceSpherical -Calculates the minimal distance between two points where one point belongs to the first polygon and the second to another polygon. Spherical means that coordinates are interpreted as coordinates on a pure and ideal sphere, which is not true for the Earth. Using this type of coordinate system speeds up execution, but of course is not precise. +Calculates the minimal distance between two points where one point belongs to the first polygon and the second to another polygon. Spherical means that coordinates are interpreted as coordinates on a pure and ideal sphere, which is not true for the Earth. Using this type of coordinate system speeds up execution, but of course is not precise. ### Example diff --git a/docs/en/sql-reference/functions/other-functions.md b/docs/en/sql-reference/functions/other-functions.md index afd779ca8cd..1b50104da52 100644 --- a/docs/en/sql-reference/functions/other-functions.md +++ b/docs/en/sql-reference/functions/other-functions.md @@ -4189,3 +4189,94 @@ Result: │ 32 │ └─────────────────────────────┘ ``` + +## getSubcolumn + +Takes a table expression or identifier and constant string with the name of the sub-column, and returns the requested sub-column extracted from the expression. + +**Syntax** + +```sql +getSubcolumn(col_name, subcol_name) +``` + +**Arguments** + +- `col_name` — Table expression or identifier. [Expression](../syntax.md/#expressions), [Identifier](../syntax.md/#identifiers). +- `subcol_name` — The name of the sub-column. [String](../data-types/string.md). + +**Returned value** + +- Returns the extracted sub-column. + +**Example** + +Query: + +```sql +CREATE TABLE t_arr (arr Array(Tuple(subcolumn1 UInt32, subcolumn2 String))) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_arr VALUES ([(1, 'Hello'), (2, 'World')]), ([(3, 'This'), (4, 'is'), (5, 'subcolumn')]); +SELECT getSubcolumn(arr, 'subcolumn1'), getSubcolumn(arr, 'subcolumn2') FROM t_arr; +``` + +Result: + +```response + ┌─getSubcolumn(arr, 'subcolumn1')─┬─getSubcolumn(arr, 'subcolumn2')─┐ +1. │ [1,2] │ ['Hello','World'] │ +2. │ [3,4,5] │ ['This','is','subcolumn'] │ + └─────────────────────────────────┴─────────────────────────────────┘ +``` + +## getTypeSerializationStreams + +Enumerates stream paths of a data type. + +:::note +This function is intended for use by developers. +::: + +**Syntax** + +```sql +getTypeSerializationStreams(col) +``` + +**Arguments** + +- `col` — Column or string representation of a data-type from which the data type will be detected. + +**Returned value** + +- Returns an array with all the serialization sub-stream paths.[Array](../data-types/array.md)([String](../data-types/string.md)). + +**Examples** + +Query: + +```sql +SELECT getTypeSerializationStreams(tuple('a', 1, 'b', 2)); +``` + +Result: + +```response + ┌─getTypeSerializationStreams(('a', 1, 'b', 2))─────────────────────────────────────────────────────────────────────────┐ +1. │ ['{TupleElement(1), Regular}','{TupleElement(2), Regular}','{TupleElement(3), Regular}','{TupleElement(4), Regular}'] │ + └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + +Query: + +```sql +SELECT getTypeSerializationStreams('Map(String, Int64)'); +``` + +Result: + +```response + ┌─getTypeSerializationStreams('Map(String, Int64)')────────────────────────────────────────────────────────────────┐ +1. │ ['{ArraySizes}','{ArrayElements, TupleElement(keys), Regular}','{ArrayElements, TupleElement(values), Regular}'] │ + └──────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +``` + diff --git a/docs/en/sql-reference/functions/type-conversion-functions.md b/docs/en/sql-reference/functions/type-conversion-functions.md index a2b6e496319..9e4b39e95e4 100644 --- a/docs/en/sql-reference/functions/type-conversion-functions.md +++ b/docs/en/sql-reference/functions/type-conversion-functions.md @@ -64,9 +64,8 @@ toInt8(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -95,7 +94,7 @@ SELECT toInt8(-8), toInt8(-8.8), toInt8('-8') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -156,7 +155,7 @@ Query: SELECT toInt8OrZero('-8'), toInt8OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -216,7 +215,7 @@ Query: SELECT toInt8OrNull('-8'), toInt8OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -251,9 +250,8 @@ toInt8OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int8` is unsuccessful. [Int8](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -281,7 +279,7 @@ Query: SELECT toInt8OrDefault('-8', CAST('-1', 'Int8')), toInt8OrDefault('abc', CAST('-1', 'Int8')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -314,9 +312,8 @@ toInt16(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -345,7 +342,7 @@ SELECT toInt16(-16), toInt16(-16.16), toInt16('-16') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -406,7 +403,7 @@ Query: SELECT toInt16OrZero('-16'), toInt16OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -466,7 +463,7 @@ Query: SELECT toInt16OrNull('-16'), toInt16OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -501,9 +498,8 @@ toInt16OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int16` is unsuccessful. [Int16](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -531,7 +527,7 @@ Query: SELECT toInt16OrDefault('-16', CAST('-1', 'Int16')), toInt16OrDefault('abc', CAST('-1', 'Int16')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -564,9 +560,8 @@ toInt32(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -595,7 +590,7 @@ SELECT toInt32(-32), toInt32(-32.32), toInt32('-32') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -656,7 +651,7 @@ Query: SELECT toInt32OrZero('-32'), toInt32OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -715,7 +710,7 @@ Query: SELECT toInt32OrNull('-32'), toInt32OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -750,9 +745,8 @@ toInt32OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int32` is unsuccessful. [Int32](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -780,7 +774,7 @@ Query: SELECT toInt32OrDefault('-32', CAST('-1', 'Int32')), toInt32OrDefault('abc', CAST('-1', 'Int32')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -813,9 +807,8 @@ toInt64(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported types: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -844,7 +837,7 @@ SELECT toInt64(-64), toInt64(-64.64), toInt64('-64') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -905,7 +898,7 @@ Query: SELECT toInt64OrZero('-64'), toInt64OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -965,7 +958,7 @@ Query: SELECT toInt64OrNull('-64'), toInt64OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1000,9 +993,8 @@ toInt64OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int64` is unsuccessful. [Int64](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1030,7 +1022,7 @@ Query: SELECT toInt64OrDefault('-64', CAST('-1', 'Int64')), toInt64OrDefault('abc', CAST('-1', 'Int64')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1063,9 +1055,8 @@ toInt128(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1093,7 +1084,7 @@ SELECT toInt128(-128), toInt128(-128.8), toInt128('-128') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1154,7 +1145,7 @@ Query: SELECT toInt128OrZero('-128'), toInt128OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1214,7 +1205,7 @@ Query: SELECT toInt128OrNull('-128'), toInt128OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1279,7 +1270,7 @@ Query: SELECT toInt128OrDefault('-128', CAST('-1', 'Int128')), toInt128OrDefault('abc', CAST('-1', 'Int128')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1312,9 +1303,8 @@ toInt256(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1342,7 +1332,7 @@ SELECT toInt256(-256), toInt256(-256.256), toInt256('-256') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1403,7 +1393,7 @@ Query: SELECT toInt256OrZero('-256'), toInt256OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1463,7 +1453,7 @@ Query: SELECT toInt256OrNull('-256'), toInt256OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1498,9 +1488,8 @@ toInt256OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `Int256` is unsuccessful. [Int256](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf` @@ -1528,7 +1517,7 @@ Query: SELECT toInt256OrDefault('-256', CAST('-1', 'Int256')), toInt256OrDefault('abc', CAST('-1', 'Int256')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1546,7 +1535,7 @@ toInt256OrDefault('abc', CAST('-1', 'Int256')): -1 - [`toInt256OrZero`](#toint256orzero). - [`toInt256OrNull`](#toint256ornull). -# toUInt8 +## toUInt8 Converts an input value to a value of type [`UInt8`](../data-types/int-uint.md). Throws an exception in case of an error. @@ -1561,9 +1550,8 @@ toUInt8(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1592,7 +1580,7 @@ SELECT toUInt8(8), toUInt8(8.8), toUInt8('8') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1653,7 +1641,7 @@ Query: SELECT toUInt8OrZero('-8'), toUInt8OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1713,7 +1701,7 @@ Query: SELECT toUInt8OrNull('8'), toUInt8OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1748,9 +1736,8 @@ toUInt8OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt8` is unsuccessful. [UInt8](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1778,7 +1765,7 @@ Query: SELECT toUInt8OrDefault('8', CAST('0', 'UInt8')), toUInt8OrDefault('abc', CAST('0', 'UInt8')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1811,9 +1798,8 @@ toUInt16(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -1842,7 +1828,7 @@ SELECT toUInt16(16), toUInt16(16.16), toUInt16('16') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1903,7 +1889,7 @@ Query: SELECT toUInt16OrZero('16'), toUInt16OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1963,7 +1949,7 @@ Query: SELECT toUInt16OrNull('16'), toUInt16OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -1998,9 +1984,8 @@ toUInt16OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt16` is unsuccessful. [UInt16](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2028,7 +2013,7 @@ Query: SELECT toUInt16OrDefault('16', CAST('0', 'UInt16')), toUInt16OrDefault('abc', CAST('0', 'UInt16')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2061,9 +2046,8 @@ toUInt32(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2092,7 +2076,7 @@ SELECT toUInt32(32), toUInt32(32.32), toUInt32('32') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2154,7 +2138,7 @@ Query: SELECT toUInt32OrZero('32'), toUInt32OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2214,7 +2198,7 @@ Query: SELECT toUInt32OrNull('32'), toUInt32OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2249,9 +2233,8 @@ toUInt32OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt32` is unsuccessful. [UInt32](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2279,7 +2262,7 @@ Query: SELECT toUInt32OrDefault('32', CAST('0', 'UInt32')), toUInt32OrDefault('abc', CAST('0', 'UInt32')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2312,9 +2295,8 @@ toUInt64(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported types: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2343,7 +2325,7 @@ SELECT toUInt64(64), toUInt64(64.64), toUInt64('64') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2362,7 +2344,7 @@ toUInt64('64'): 64 - [`toUInt64OrNull`](#touint64ornull). - [`toUInt64OrDefault`](#touint64ordefault). -## toInt64OrZero +## toUInt64OrZero Like [`toUInt64`](#touint64), this function converts an input value to a value of type [UInt64](../data-types/int-uint.md) but returns `0` in case of an error. @@ -2404,7 +2386,7 @@ Query: SELECT toUInt64OrZero('64'), toUInt64OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2464,7 +2446,7 @@ Query: SELECT toUInt64OrNull('64'), toUInt64OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2499,9 +2481,8 @@ toUInt64OrDefault(expr[, default]) - `defauult` (optional) — The default value to return if parsing to type `UInt64` is unsuccessful. [UInt64](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2529,7 +2510,7 @@ Query: SELECT toUInt64OrDefault('64', CAST('0', 'UInt64')), toUInt64OrDefault('abc', CAST('0', 'UInt64')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2562,9 +2543,8 @@ toUInt128(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2592,7 +2572,7 @@ SELECT toUInt128(128), toUInt128(128.8), toUInt128('128') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2653,7 +2633,7 @@ Query: SELECT toUInt128OrZero('128'), toUInt128OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2713,7 +2693,7 @@ Query: SELECT toUInt128OrNull('128'), toUInt128OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2778,7 +2758,7 @@ Query: SELECT toUInt128OrDefault('128', CAST('0', 'UInt128')), toUInt128OrDefault('abc', CAST('0', 'UInt128')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2811,9 +2791,8 @@ toUInt256(expr) - `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Unsupported arguments: - String representations of Float32/64 values, including `NaN` and `Inf`. @@ -2841,7 +2820,7 @@ SELECT toUInt256(256), toUInt256(256.256), toUInt256('256') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2902,7 +2881,7 @@ Query: SELECT toUInt256OrZero('256'), toUInt256OrZero('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2962,7 +2941,7 @@ Query: SELECT toUInt256OrNull('256'), toUInt256OrNull('abc') -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -2997,9 +2976,8 @@ toUInt256OrDefault(expr[, default]) - `default` (optional) — The default value to return if parsing to type `UInt256` is unsuccessful. [UInt256](../data-types/int-uint.md). Supported arguments: -- Values of type (U)Int8/16/32/64/128/256. +- Values or string representations of type (U)Int8/16/32/64/128/256. - Values of type Float32/64. -- String representations of (U)Int8/16/32/128/256. Arguments for which the default value is returned: - String representations of Float32/64 values, including `NaN` and `Inf` @@ -3027,7 +3005,7 @@ Query: SELECT toUInt256OrDefault('-256', CAST('0', 'UInt256')), toUInt256OrDefault('abc', CAST('0', 'UInt256')) -FORMAT vertical; +FORMAT Vertical; ``` Result: @@ -3954,175 +3932,1022 @@ SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeN ## toDateTime64OrDefault -## toDecimal(32\|64\|128\|256) +## toDecimal32 -Converts `value` to the [Decimal](../data-types/decimal.md) data type with precision of `S`. The `value` can be a number or a string. The `S` (scale) parameter specifies the number of decimal places. +Converts an input value to a value of type [`Decimal(9, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. -- `toDecimal32(value, S)` -- `toDecimal64(value, S)` -- `toDecimal128(value, S)` -- `toDecimal256(value, S)` +**Syntax** -## toDecimal(32\|64\|128\|256)OrNull - -Converts an input string to a [Nullable(Decimal(P,S))](../data-types/decimal.md) data type value. This family of functions includes: - -- `toDecimal32OrNull(expr, S)` — Results in `Nullable(Decimal32(S))` data type. -- `toDecimal64OrNull(expr, S)` — Results in `Nullable(Decimal64(S))` data type. -- `toDecimal128OrNull(expr, S)` — Results in `Nullable(Decimal128(S))` data type. -- `toDecimal256OrNull(expr, S)` — Results in `Nullable(Decimal256(S))` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a `NULL` value instead of an exception in the event of an input value parsing error. +```sql +toDecimal32(expr, S) +``` **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions), returns a value in the [String](../data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: **Returned value** -A value in the `Nullable(Decimal(P,S))` data type. The value contains: +- Value of type `Decimal(9, S)`. [Decimal32(S)](../data-types/int-uint.md). -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- `NULL`, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. - -**Examples** +**Example** Query: -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val); +```sql +SELECT + toDecimal32(2, 1) AS a, toTypeName(a) AS type_a, + toDecimal32(4.2, 2) AS b, toTypeName(b) AS type_b, + toDecimal32('4.2', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; ``` Result: ```response -┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐ -│ -1.111 │ Nullable(Decimal(9, 5)) │ -└────────┴────────────────────────────────────────────────────┘ +Row 1: +────── +a: 2 +type_a: Decimal(9, 1) +b: 4.2 +type_b: Decimal(9, 2) +c: 4.2 +type_c: Decimal(9, 3) ``` -Query: +**See also** -``` sql -SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val); +- [`toDecimal32OrZero`](#todecimal32orzero). +- [`toDecimal32OrNull`](#todecimal32ornull). +- [`toDecimal32OrDefault`](#todecimal32ordefault). + +## toDecimal32OrZero + +Like [`toDecimal32`](#todecimal32), this function converts an input value to a value of type [Decimal(9, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal32OrZero(expr, S) ``` -Result: - -```response -┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐ -│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │ -└──────┴────────────────────────────────────────────────────┘ -``` - - -## toDecimal(32\|64\|128\|256)OrDefault - -Converts an input string to a [Decimal(P,S)](../data-types/decimal.md) data type value. This family of functions includes: - -- `toDecimal32OrDefault(expr, S)` — Results in `Decimal32(S)` data type. -- `toDecimal64OrDefault(expr, S)` — Results in `Decimal64(S)` data type. -- `toDecimal128OrDefault(expr, S)` — Results in `Decimal128(S)` data type. -- `toDecimal256OrDefault(expr, S)` — Results in `Decimal256(S)` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a default value instead of an exception in the event of an input value parsing error. - **Arguments** -- `expr` — [Expression](../syntax.md/#syntax-expressions), returns a value in the [String](../data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: **Returned value** -A value in the `Decimal(P,S)` data type. The value contains: - -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- Default `Decimal(P,S)` data type value, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. - -**Examples** - -Query: - -``` sql -SELECT toDecimal32OrDefault(toString(-1.111), 5) AS val, toTypeName(val); -``` - -Result: - -```response -┌────val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 5))─┐ -│ -1.111 │ Decimal(9, 5) │ -└────────┴───────────────────────────────────────────────────────┘ -``` - -Query: - -``` sql -SELECT toDecimal32OrDefault(toString(-1.111), 2) AS val, toTypeName(val); -``` - -Result: - -```response -┌─val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 2))─┐ -│ 0 │ Decimal(9, 2) │ -└─────┴───────────────────────────────────────────────────────┘ -``` - -## toDecimal(32\|64\|128\|256)OrZero - -Converts an input value to the [Decimal(P,S)](../data-types/decimal.md) data type. This family of functions includes: - -- `toDecimal32OrZero( expr, S)` — Results in `Decimal32(S)` data type. -- `toDecimal64OrZero( expr, S)` — Results in `Decimal64(S)` data type. -- `toDecimal128OrZero( expr, S)` — Results in `Decimal128(S)` data type. -- `toDecimal256OrZero( expr, S)` — Results in `Decimal256(S)` data type. - -These functions should be used instead of `toDecimal*()` functions, if you prefer to get a `0` value instead of an exception in the event of an input value parsing error. - -**Arguments** - -- `expr` — [Expression](../syntax.md/#syntax-expressions), returns a value in the [String](../data-types/string.md) data type. ClickHouse expects the textual representation of the decimal number. For example, `'1.111'`. -- `S` — Scale, the number of decimal places in the resulting value. - -**Returned value** - -A value in the `Nullable(Decimal(P,S))` data type. The value contains: - -- Number with `S` decimal places, if ClickHouse interprets the input string as a number. -- 0 with `S` decimal places, if ClickHouse can’t interpret the input string as a number or if the input number contains more than `S` decimal places. +- Value of type `Decimal(9, S)` if successful, otherwise `0` with `S` decimal places. [Decimal32(S)](../data-types/decimal.md). **Example** Query: ``` sql -SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val); +SELECT + toDecimal32OrZero(toString(-1.111), 5) AS a, + toTypeName(a), + toDecimal32OrZero(toString('Inf'), 5) as b, + toTypeName(b) +FORMAT Vertical; ``` Result: ```response -┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐ -│ -1.111 │ Decimal(9, 5) │ -└────────┴────────────────────────────────────────────────────┘ +Row 1: +────── +a: -1.111 +toTypeName(a): Decimal(9, 5) +b: 0 +toTypeName(b): Decimal(9, 5) ``` +**See also** + +- [`toDecimal32`](#todecimal32). +- [`toDecimal32OrNull`](#todecimal32ornull). +- [`toDecimal32OrDefault`](#todecimal32ordefault). + +## toDecimal32OrNull + +Like [`toDecimal32`](#todecimal32), this function converts an input value to a value of type [Nullable(Decimal(9, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal32OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(9, S))` if successful, otherwise value `NULL` of the same type. [Decimal32(S)](../data-types/decimal.md). + +**Examples** + Query: ``` sql -SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val); +SELECT + toDecimal32OrNull(toString(-1.111), 5) AS a, + toTypeName(a), + toDecimal32OrNull(toString('Inf'), 5) as b, + toTypeName(b) +FORMAT Vertical; ``` Result: ```response -┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐ -│ 0.00 │ Decimal(9, 2) │ -└──────┴────────────────────────────────────────────────────┘ +Row 1: +────── +a: -1.111 +toTypeName(a): Nullable(Decimal(9, 5)) +b: ᴺᵁᴸᴸ +toTypeName(b): Nullable(Decimal(9, 5)) ``` +**See also** + +- [`toDecimal32`](#todecimal32). +- [`toDecimal32OrZero`](#todecimal32orzero). +- [`toDecimal32OrDefault`](#todecimal32ordefault). + +## toDecimal32OrDefault + +Like [`toDecimal32`](#todecimal32), this function converts an input value to a value of type [Decimal(9, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal32OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 9, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal32(S)` is unsuccessful. [Decimal32(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal32OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal32`: `( -1 * 10^(9 - S), 1 * 10^(9 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(9, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal32(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal32OrDefault(toString(0.0001), 5) AS a, + toTypeName(a), + toDecimal32OrDefault('Inf', 0, CAST('-1', 'Decimal32(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(9, 5) +b: -1 +toTypeName(b): Decimal(9, 0) +``` + +**See also** + +- [`toDecimal32`](#todecimal32). +- [`toDecimal32OrZero`](#todecimal32orzero). +- [`toDecimal32OrNull`](#todecimal32ornull). + +## toDecimal64 + +Converts an input value to a value of type [`Decimal(18, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. + +**Syntax** + +```sql +toDecimal64(expr, S) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(18, S)`. [Decimal64(S)](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toDecimal64(2, 1) AS a, toTypeName(a) AS type_a, + toDecimal64(4.2, 2) AS b, toTypeName(b) AS type_b, + toDecimal64('4.2', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 2 +type_a: Decimal(18, 1) +b: 4.2 +type_b: Decimal(18, 2) +c: 4.2 +type_c: Decimal(18, 3) +``` + +**See also** + +- [`toDecimal64OrZero`](#todecimal64orzero). +- [`toDecimal64OrNull`](#todecimal64ornull). +- [`toDecimal64OrDefault`](#todecimal64ordefault). + +## toDecimal64OrZero + +Like [`toDecimal64`](#todecimal64), this function converts an input value to a value of type [Decimal(18, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal64OrZero(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(18, S)` if successful, otherwise `0` with `S` decimal places. [Decimal64(S)](../data-types/decimal.md). + +**Example** + +Query: + +``` sql +SELECT + toDecimal64OrZero(toString(0.0001), 18) AS a, + toTypeName(a), + toDecimal64OrZero(toString('Inf'), 18) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(18, 18) +b: 0 +toTypeName(b): Decimal(18, 18) +``` + +**See also** + +- [`toDecimal64`](#todecimal64). +- [`toDecimal64OrNull`](#todecimal64ornull). +- [`toDecimal64OrDefault`](#todecimal64ordefault). + +## toDecimal64OrNull + +Like [`toDecimal64`](#todecimal64), this function converts an input value to a value of type [Nullable(Decimal(18, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal64OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(18, S))` if successful, otherwise value `NULL` of the same type. [Decimal64(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal64OrNull(toString(0.0001), 18) AS a, + toTypeName(a), + toDecimal64OrNull(toString('Inf'), 18) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Nullable(Decimal(18, 18)) +b: ᴺᵁᴸᴸ +toTypeName(b): Nullable(Decimal(18, 18)) +``` + +**See also** + +- [`toDecimal64`](#todecimal64). +- [`toDecimal64OrZero`](#todecimal64orzero). +- [`toDecimal64OrDefault`](#todecimal64ordefault). + +## toDecimal64OrDefault + +Like [`toDecimal64`](#todecimal64), this function converts an input value to a value of type [Decimal(18, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal64OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 18, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal64(S)` is unsuccessful. [Decimal64(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal64OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal64`: `( -1 * 10^(18 - S), 1 * 10^(18 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(18, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal64(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal64OrDefault(toString(0.0001), 18) AS a, + toTypeName(a), + toDecimal64OrDefault('Inf', 0, CAST('-1', 'Decimal64(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(18, 18) +b: -1 +toTypeName(b): Decimal(18, 0) +``` + +**See also** + +- [`toDecimal64`](#todecimal64). +- [`toDecimal64OrZero`](#todecimal64orzero). +- [`toDecimal64OrNull`](#todecimal64ornull). + +## toDecimal128 + +Converts an input value to a value of type [`Decimal(38, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. + +**Syntax** + +```sql +toDecimal128(expr, S) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(38, S)`. [Decimal128(S)](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toDecimal128(99, 1) AS a, toTypeName(a) AS type_a, + toDecimal128(99.67, 2) AS b, toTypeName(b) AS type_b, + toDecimal128('99.67', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 99 +type_a: Decimal(38, 1) +b: 99.67 +type_b: Decimal(38, 2) +c: 99.67 +type_c: Decimal(38, 3) +``` + +**See also** + +- [`toDecimal128OrZero`](#todecimal128orzero). +- [`toDecimal128OrNull`](#todecimal128ornull). +- [`toDecimal128OrDefault`](#todecimal128ordefault). + +## toDecimal128OrZero + +Like [`toDecimal128`](#todecimal128), this function converts an input value to a value of type [Decimal(38, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal128OrZero(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(38, S)` if successful, otherwise `0` with `S` decimal places. [Decimal128(S)](../data-types/decimal.md). + +**Example** + +Query: + +``` sql +SELECT + toDecimal128OrZero(toString(0.0001), 38) AS a, + toTypeName(a), + toDecimal128OrZero(toString('Inf'), 38) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(38, 38) +b: 0 +toTypeName(b): Decimal(38, 38) +``` + +**See also** + +- [`toDecimal128`](#todecimal128). +- [`toDecimal128OrNull`](#todecimal128ornull). +- [`toDecimal128OrDefault`](#todecimal128ordefault). + +## toDecimal128OrNull + +Like [`toDecimal128`](#todecimal128), this function converts an input value to a value of type [Nullable(Decimal(38, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal128OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(38, S))` if successful, otherwise value `NULL` of the same type. [Decimal128(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal128OrNull(toString(1/42), 38) AS a, + toTypeName(a), + toDecimal128OrNull(toString('Inf'), 38) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Nullable(Decimal(38, 38)) +b: ᴺᵁᴸᴸ +toTypeName(b): Nullable(Decimal(38, 38)) +``` + +**See also** + +- [`toDecimal128`](#todecimal128). +- [`toDecimal128OrZero`](#todecimal128orzero). +- [`toDecimal128OrDefault`](#todecimal128ordefault). + +## toDecimal128OrDefault + +Like [`toDecimal128`](#todecimal128), this function converts an input value to a value of type [Decimal(38, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal128OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 38, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal128(S)` is unsuccessful. [Decimal128(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal128OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal128`: `( -1 * 10^(38 - S), 1 * 10^(38 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(38, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal128(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal128OrDefault(toString(1/42), 18) AS a, + toTypeName(a), + toDecimal128OrDefault('Inf', 0, CAST('-1', 'Decimal128(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Decimal(38, 18) +b: -1 +toTypeName(b): Decimal(38, 0) +``` + +**See also** + +- [`toDecimal128`](#todecimal128). +- [`toDecimal128OrZero`](#todecimal128orzero). +- [`toDecimal128OrNull`](#todecimal128ornull). + +## toDecimal256 + +Converts an input value to a value of type [`Decimal(76, S)`](../data-types/decimal.md) with scale of `S`. Throws an exception in case of an error. + +**Syntax** + +```sql +toDecimal256(expr, S) +``` + +**Arguments** + +- `expr` — Expression returning a number or a string representation of a number. [Expression](../syntax.md/#syntax-expressions). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- Values or string representations of type (U)Int8/16/32/64/128/256. +- Values or string representations of type Float32/64. + +Unsupported arguments: +- Values or string representations of Float32/64 values `NaN` and `Inf` (case-insensitive). +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an exception. +::: + +**Returned value** + +- Value of type `Decimal(76, S)`. [Decimal256(S)](../data-types/int-uint.md). + +**Example** + +Query: + +```sql +SELECT + toDecimal256(99, 1) AS a, toTypeName(a) AS type_a, + toDecimal256(99.67, 2) AS b, toTypeName(b) AS type_b, + toDecimal256('99.67', 3) AS c, toTypeName(c) AS type_c +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 99 +type_a: Decimal(76, 1) +b: 99.67 +type_b: Decimal(76, 2) +c: 99.67 +type_c: Decimal(76, 3) +``` + +**See also** + +- [`toDecimal256OrZero`](#todecimal256orzero). +- [`toDecimal256OrNull`](#todecimal256ornull). +- [`toDecimal256OrDefault`](#todecimal256ordefault). + +## toDecimal256OrZero + +Like [`toDecimal256`](#todecimal256), this function converts an input value to a value of type [Decimal(76, S)](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal256OrZero(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256OrZero('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(76, S)` if successful, otherwise `0` with `S` decimal places. [Decimal256(S)](../data-types/decimal.md). + +**Example** + +Query: + +``` sql +SELECT + toDecimal256OrZero(toString(0.0001), 76) AS a, + toTypeName(a), + toDecimal256OrZero(toString('Inf'), 76) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.0001 +toTypeName(a): Decimal(76, 76) +b: 0 +toTypeName(b): Decimal(76, 76) +``` + +**See also** + +- [`toDecimal256`](#todecimal256). +- [`toDecimal256OrNull`](#todecimal256ornull). +- [`toDecimal256OrDefault`](#todecimal256ordefault). + +## toDecimal256OrNull + +Like [`toDecimal256`](#todecimal256), this function converts an input value to a value of type [Nullable(Decimal(76, S))](../data-types/decimal.md) but returns `0` in case of an error. + +**Syntax** + +```sql +toDecimal256OrNull(expr, S) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256OrNull('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Nullable(Decimal(76, S))` if successful, otherwise value `NULL` of the same type. [Decimal256(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal256OrNull(toString(1/42), 76) AS a, + toTypeName(a), + toDecimal256OrNull(toString('Inf'), 76) as b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Nullable(Decimal(76, 76)) +b: ᴺᵁᴸᴸ +toTypeName(b): Nullable(Decimal(76, 76)) +``` + +**See also** + +- [`toDecimal256`](#todecimal256). +- [`toDecimal256OrZero`](#todecimal256orzero). +- [`toDecimal256OrDefault`](#todecimal256ordefault). + +## toDecimal256OrDefault + +Like [`toDecimal256`](#todecimal256), this function converts an input value to a value of type [Decimal(76, S)](../data-types/decimal.md) but returns the default value in case of an error. + +**Syntax** + +```sql +toDecimal256OrDefault(expr, S[, default]) +``` + +**Arguments** + +- `expr` — A String representation of a number. [String](../data-types/string.md). +- `S` — Scale parameter between 0 and 76, specifying how many digits the fractional part of a number can have. [UInt8](../data-types/int-uint.md). +- `default` (optional) — The default value to return if parsing to type `Decimal256(S)` is unsuccessful. [Decimal256(S)](../data-types/decimal.md). + +Supported arguments: +- String representations of type (U)Int8/16/32/64/128/256. +- String representations of type Float32/64. + +Unsupported arguments: +- String representations of Float32/64 values `NaN` and `Inf`. +- String representations of binary and hexadecimal values, e.g. `SELECT toDecimal256OrDefault('0xc0fe', 1);`. + +:::note +An overflow can occur if the value of `expr` exceeds the bounds of `Decimal256`: `( -1 * 10^(76 - S), 1 * 10^(76 - S) )`. +Excessive digits in a fraction are discarded (not rounded). +Excessive digits in the integer part will lead to an error. +::: + +**Returned value** + +- Value of type `Decimal(76, S)` if successful, otherwise returns the default value if passed or `0` if not. [Decimal256(S)](../data-types/decimal.md). + +**Examples** + +Query: + +``` sql +SELECT + toDecimal256OrDefault(toString(1/42), 76) AS a, + toTypeName(a), + toDecimal256OrDefault('Inf', 0, CAST('-1', 'Decimal256(0)')) AS b, + toTypeName(b) +FORMAT Vertical; +``` + +Result: + +```response +Row 1: +────── +a: 0.023809523809523808 +toTypeName(a): Decimal(76, 76) +b: -1 +toTypeName(b): Decimal(76, 0) +``` + +**See also** + +- [`toDecimal256`](#todecimal256). +- [`toDecimal256OrZero`](#todecimal256orzero). +- [`toDecimal256OrNull`](#todecimal256ornull). + ## toString Functions for converting between numbers, strings (but not fixed strings), dates, and dates with times. diff --git a/docs/en/sql-reference/statements/alter/statistics.md b/docs/en/sql-reference/statements/alter/statistics.md index 6880cef0e5c..7a1774a01b5 100644 --- a/docs/en/sql-reference/statements/alter/statistics.md +++ b/docs/en/sql-reference/statements/alter/statistics.md @@ -8,26 +8,28 @@ sidebar_label: STATISTICS The following operations are available: -- `ALTER TABLE [db].table ADD STATISTICS (columns list) TYPE (type list)` - Adds statistic description to tables metadata. +- `ALTER TABLE [db].table ADD STATISTICS [IF NOT EXISTS] (column list) TYPE (type list)` - Adds statistic description to tables metadata. -- `ALTER TABLE [db].table MODIFY STATISTICS (columns list) TYPE (type list)` - Modifies statistic description to tables metadata. +- `ALTER TABLE [db].table MODIFY STATISTICS (column list) TYPE (type list)` - Modifies statistic description to tables metadata. -- `ALTER TABLE [db].table DROP STATISTICS (columns list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. +- `ALTER TABLE [db].table DROP STATISTICS [IF EXISTS] (column list)` - Removes statistics from the metadata of the specified columns and deletes all statistics objects in all parts for the specified columns. -- `ALTER TABLE [db].table CLEAR STATISTICS (columns list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. +- `ALTER TABLE [db].table CLEAR STATISTICS [IF EXISTS] (column list)` - Deletes all statistics objects in all parts for the specified columns. Statistics objects can be rebuild using `ALTER TABLE MATERIALIZE STATISTICS`. -- `ALTER TABLE [db.]table MATERIALIZE STATISTICS (columns list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). +- `ALTER TABLE [db.]table MATERIALIZE STATISTICS [IF EXISTS] (column list)` - Rebuilds the statistic for columns. Implemented as a [mutation](../../../sql-reference/statements/alter/index.md#mutations). The first two commands are lightweight in a sense that they only change metadata or remove files. Also, they are replicated, syncing statistics metadata via ZooKeeper. -There is an example adding two statistics types to two columns: +## Example: + +Adding two statistics types to two columns: ``` ALTER TABLE t1 MODIFY STATISTICS c, d TYPE TDigest, Uniq; ``` :::note -Statistic manipulation is supported only for tables with [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). +Statistic are supported only for [`*MergeTree`](../../../engines/table-engines/mergetree-family/mergetree.md) engine tables (including [replicated](../../../engines/table-engines/mergetree-family/replication.md) variants). ::: diff --git a/docs/en/sql-reference/statements/select/join.md b/docs/en/sql-reference/statements/select/join.md index 96d9d26977d..b228f7025c4 100644 --- a/docs/en/sql-reference/statements/select/join.md +++ b/docs/en/sql-reference/statements/select/join.md @@ -186,7 +186,7 @@ Otherwise, you'll get `INVALID_JOIN_ON_EXPRESSION`. ::: -Clickhouse currently supports `ALL INNER/LEFT/RIGHT/FULL JOIN` with inequality conditions in addition to equality conditions. The inequality conditions are supported only for `hash` and `grace_hash` join algorithms. The inequality conditions are not supported with `join_use_nulls`. +Clickhouse currently supports `ALL/ANY/SEMI/ANTI INNER/LEFT/RIGHT/FULL JOIN` with inequality conditions in addition to equality conditions. The inequality conditions are supported only for `hash` and `grace_hash` join algorithms. The inequality conditions are not supported with `join_use_nulls`. **Example** diff --git a/docs/en/sql-reference/table-functions/azureBlobStorage.md b/docs/en/sql-reference/table-functions/azureBlobStorage.md index f59fedeb3a2..6936c807f96 100644 --- a/docs/en/sql-reference/table-functions/azureBlobStorage.md +++ b/docs/en/sql-reference/table-functions/azureBlobStorage.md @@ -77,3 +77,16 @@ SELECT count(*) FROM azureBlobStorage('DefaultEndpointsProtocol=https;AccountNam **See Also** - [AzureBlobStorage Table Engine](/docs/en/engines/table-engines/integrations/azureBlobStorage.md) + +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from azureBlobStorage(config, storage_account_url='...', container='...', blob_path='http://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; +``` diff --git a/docs/en/sql-reference/table-functions/file.md b/docs/en/sql-reference/table-functions/file.md index 44b1b50620a..3243e6cf569 100644 --- a/docs/en/sql-reference/table-functions/file.md +++ b/docs/en/sql-reference/table-functions/file.md @@ -103,7 +103,7 @@ LIMIT 2; └─────────┴─────────┴─────────┘ ``` -### Inserting data from a file into a table: +### Inserting data from a file into a table ``` sql INSERT INTO FUNCTION @@ -206,6 +206,19 @@ SELECT count(*) FROM file('big_dir/**/file002', 'CSV', 'name String, value UInt3 - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from file('data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; +``` + ## Settings {#settings} - [engine_file_empty_if_not_exists](/docs/en/operations/settings/settings.md#engine-file-empty_if-not-exists) - allows to select empty data from a file that doesn't exist. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/hdfs.md b/docs/en/sql-reference/table-functions/hdfs.md index 405ac477846..f96e48d914d 100644 --- a/docs/en/sql-reference/table-functions/hdfs.md +++ b/docs/en/sql-reference/table-functions/hdfs.md @@ -100,6 +100,19 @@ FROM hdfs('hdfs://hdfs1:9000/big_dir/file{0..9}{0..9}{0..9}', 'CSV', 'name Strin - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from HDFS('hdfs://hdfs1:9000/data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; +``` + ## Storage Settings {#storage-settings} - [hdfs_truncate_on_insert](/docs/en/operations/settings/settings.md#hdfs_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/s3.md b/docs/en/sql-reference/table-functions/s3.md index 35e5d86034c..1bd9f38517e 100644 --- a/docs/en/sql-reference/table-functions/s3.md +++ b/docs/en/sql-reference/table-functions/s3.md @@ -274,6 +274,19 @@ FROM s3( - `_size` — Size of the file in bytes. Type: `Nullable(UInt64)`. If the file size is unknown, the value is `NULL`. In case of archive shows uncompressed file size of the file inside the archive. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from s3('s3://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; +``` + ## Storage Settings {#storage-settings} - [s3_truncate_on_insert](/docs/en/operations/settings/settings.md#s3_truncate_on_insert) - allows to truncate file before insert into it. Disabled by default. diff --git a/docs/en/sql-reference/table-functions/url.md b/docs/en/sql-reference/table-functions/url.md index 3bb7aff53a7..b4027594e7c 100644 --- a/docs/en/sql-reference/table-functions/url.md +++ b/docs/en/sql-reference/table-functions/url.md @@ -55,6 +55,19 @@ Character `|` inside patterns is used to specify failover addresses. They are it - `_size` — Size of the resource in bytes. Type: `Nullable(UInt64)`. If the size is unknown, the value is `NULL`. - `_time` — Last modified time of the file. Type: `Nullable(DateTime)`. If the time is unknown, the value is `NULL`. +## Hive-style partitioning {#hive-style-partitioning} + +When setting `use_hive_partitioning` is set to 1, ClickHouse will detect Hive-style partitioning in the path (`/name=value/`) and will allow to use partition columns as virtual columns in the query. These virtual columns will have the same names as in the partitioned path, but starting with `_`. + +**Example** + +Use virtual column, created with Hive-style partitioning + +``` sql +SET use_hive_partitioning = 1; +SELECT * from url('http://data/path/date=*/country=*/code=*/*.parquet') where _date > '2020-01-01' and _country = 'Netherlands' and _code = 42; +``` + ## Storage Settings {#storage-settings} - [engine_url_skip_empty_files](/docs/en/operations/settings/settings.md#engine_url_skip_empty_files) - allows to skip empty files while reading. Disabled by default. diff --git a/docs/zh/operations/external-authenticators/kerberos.md b/docs/zh/operations/external-authenticators/kerberos.md index 649a0b9bd48..d1a39bbc952 100644 --- a/docs/zh/operations/external-authenticators/kerberos.md +++ b/docs/zh/operations/external-authenticators/kerberos.md @@ -23,30 +23,30 @@ slug: /zh/operations/external-authenticators/kerberos 示例 (进入 `config.xml`): ```xml - + - + ``` 主体规范: ```xml - + HTTP/clickhouse.example.com@EXAMPLE.COM - + ``` 按领域过滤: ```xml - + EXAMPLE.COM - + ``` !!! warning "注意" @@ -74,7 +74,7 @@ Kerberos主体名称格式通常遵循以下模式: 示例 (进入 `users.xml`): ``` - + @@ -85,7 +85,7 @@ Kerberos主体名称格式通常遵循以下模式: - + ``` !!! warning "警告" diff --git a/programs/CMakeLists.txt b/programs/CMakeLists.txt index ce3a4659e0e..3add371b30f 100644 --- a/programs/CMakeLists.txt +++ b/programs/CMakeLists.txt @@ -1,4 +1,4 @@ -add_compile_options($<$,$>:${COVERAGE_FLAGS}>) +add_compile_options("$<$,$>:${COVERAGE_FLAGS}>") if (USE_CLANG_TIDY) set (CMAKE_CXX_CLANG_TIDY "${CLANG_TIDY_PATH}") diff --git a/programs/benchmark/Benchmark.cpp b/programs/benchmark/Benchmark.cpp index 48dca82eb2b..36f774a3c12 100644 --- a/programs/benchmark/Benchmark.cpp +++ b/programs/benchmark/Benchmark.cpp @@ -75,6 +75,8 @@ public: const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, + const String & proto_recv_chunked_, const String & quota_key_, const String & stage, bool randomize_, @@ -128,7 +130,9 @@ public: connections.emplace_back(std::make_unique( concurrency, cur_host, cur_port, - default_database_, user_, password_, quota_key_, + default_database_, user_, password_, + proto_send_chunked_, proto_recv_chunked_, + quota_key_, /* cluster_= */ "", /* cluster_secret_= */ "", /* client_name_= */ std::string(DEFAULT_CLIENT_NAME), @@ -662,6 +666,50 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) Strings hosts = options.count("host") ? options["host"].as() : Strings({"localhost"}); + String proto_send_chunked {"notchunked"}; + String proto_recv_chunked {"notchunked"}; + + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + proto_send_chunked = cap_str; + proto_recv_chunked = cap_str; + } + else + { + if (direction == "send") + proto_send_chunked = cap_str; + else + proto_recv_chunked = cap_str; + } + } + } + + Benchmark benchmark( options["concurrency"].as(), options["delay"].as(), @@ -673,6 +721,8 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv) options["database"].as(), options["user"].as(), options["password"].as(), + proto_send_chunked, + proto_recv_chunked, options["quota_key"].as(), options["stage"].as(), options.count("randomize"), diff --git a/programs/client/Client.cpp b/programs/client/Client.cpp index 1d99d223ee9..25c94c56aa6 100644 --- a/programs/client/Client.cpp +++ b/programs/client/Client.cpp @@ -223,7 +223,7 @@ std::vector Client::loadWarningMessages() size_t rows = packet.block.rows(); for (size_t i = 0; i < rows; ++i) - messages.emplace_back(column[i].get()); + messages.emplace_back(column[i].safeGet()); } continue; diff --git a/programs/client/clickhouse-client.xml b/programs/client/clickhouse-client.xml index c32b63413e9..9ce7d1cb223 100644 --- a/programs/client/clickhouse-client.xml +++ b/programs/client/clickhouse-client.xml @@ -38,6 +38,21 @@ {display_name} \e[1;31m:)\e[0m + + + 9000 + + + diff --git a/programs/server/fuzzers/tcp_protocol_fuzzer.cpp b/programs/server/fuzzers/tcp_protocol_fuzzer.cpp index 950ea09669a..7cebdc2ad65 100644 --- a/programs/server/fuzzers/tcp_protocol_fuzzer.cpp +++ b/programs/server/fuzzers/tcp_protocol_fuzzer.cpp @@ -10,6 +10,7 @@ #include #include +#include #include @@ -25,6 +26,12 @@ static int64_t port = 9000; using namespace std::chrono_literals; +void on_exit() +{ + BaseDaemon::terminate(); + main_app.wait(); +} + extern "C" int LLVMFuzzerInitialize(int * argc, char ***argv) { @@ -60,6 +67,8 @@ int LLVMFuzzerInitialize(int * argc, char ***argv) exit(-1); } + atexit(on_exit); + return 0; } diff --git a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp index 7034e6373b1..5cc9f725b46 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArray.cpp @@ -780,12 +780,12 @@ AggregateFunctionPtr createAggregateFunctionGroupArray( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); has_limit = true; - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, @@ -816,11 +816,11 @@ AggregateFunctionPtr createAggregateFunctionGroupArraySample( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[i].get() < 0) || - (type == Field::Types::UInt64 && parameters[i].get() == 0)) + if ((type == Field::Types::Int64 && parameters[i].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[i].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - return parameters[i].get(); + return parameters[i].safeGet(); }; UInt64 max_elems = get_parameter(0); diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp index 38f2fcb9fb9..36d00b1d9ec 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayIntersect.cpp @@ -83,16 +83,16 @@ public: if (version == 1) { for (size_t i = 0; i < arr_size; ++i) - set.insert(static_cast((*data_column)[offset + i].get())); + set.insert(static_cast((*data_column)[offset + i].safeGet())); } else if (!set.empty()) { typename State::Set new_set; for (size_t i = 0; i < arr_size; ++i) { - typename State::Set::LookupResult set_value = set.find(static_cast((*data_column)[offset + i].get())); + typename State::Set::LookupResult set_value = set.find(static_cast((*data_column)[offset + i].safeGet())); if (set_value != nullptr) - new_set.insert(static_cast((*data_column)[offset + i].get())); + new_set.insert(static_cast((*data_column)[offset + i].safeGet())); } set = std::move(new_set); } diff --git a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp index 026b8d1956f..2c3ac7f883e 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArrayMoving.cpp @@ -269,12 +269,12 @@ AggregateFunctionPtr createAggregateFunctionMoving( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive integer", name); - if ((type == Field::Types::Int64 && parameters[0].get() <= 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() <= 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive integer", name); limit_size = true; - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, diff --git a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp index d41d743e17a..27043ed6aa6 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupArraySorted.cpp @@ -397,11 +397,11 @@ AggregateFunctionPtr createAggregateFunctionGroupArray( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, diff --git a/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp b/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp index 5494ef74705..636ac80e350 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupConcat.cpp @@ -247,7 +247,7 @@ AggregateFunctionPtr createAggregateFunctionGroupConcat( if (type != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "First parameter for aggregate function {} should be string", name); - delimiter = parameters[0].get(); + delimiter = parameters[0].safeGet(); } if (parameters.size() == 2) { @@ -256,12 +256,12 @@ AggregateFunctionPtr createAggregateFunctionGroupConcat( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second parameter for aggregate function {} should be a positive number", name); - if ((type == Field::Types::Int64 && parameters[1].get() <= 0) || - (type == Field::Types::UInt64 && parameters[1].get() == 0)) - throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second parameter for aggregate function {} should be a positive number, got: {}", name, parameters[1].get()); + if ((type == Field::Types::Int64 && parameters[1].safeGet() <= 0) || + (type == Field::Types::UInt64 && parameters[1].safeGet() == 0)) + throw Exception(ErrorCodes::BAD_ARGUMENTS, "Second parameter for aggregate function {} should be a positive number, got: {}", name, parameters[1].safeGet()); has_limit = true; - limit = parameters[1].get(); + limit = parameters[1].safeGet(); } if (has_limit) diff --git a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp index 7b4300b3568..5cbf449c946 100644 --- a/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp +++ b/src/AggregateFunctions/AggregateFunctionGroupUniqArray.cpp @@ -323,12 +323,12 @@ AggregateFunctionPtr createAggregateFunctionGroupUniqArray( if (type != Field::Types::Int64 && type != Field::Types::UInt64) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); - if ((type == Field::Types::Int64 && parameters[0].get() < 0) || - (type == Field::Types::UInt64 && parameters[0].get() == 0)) + if ((type == Field::Types::Int64 && parameters[0].safeGet() < 0) || + (type == Field::Types::UInt64 && parameters[0].safeGet() == 0)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name); limit_size = true; - max_elems = parameters[0].get(); + max_elems = parameters[0].safeGet(); } else throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, diff --git a/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp b/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp index 04eebe9f485..28e8d37b8c8 100644 --- a/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp +++ b/src/AggregateFunctions/AggregateFunctionKolmogorovSmirnovTest.cpp @@ -238,7 +238,7 @@ public: if (params[0].getType() != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a String", getName()); - const auto & param = params[0].get(); + const auto & param = params[0].safeGet(); if (param == "two-sided") alternative = Alternative::TwoSided; else if (param == "less") @@ -255,7 +255,7 @@ public: if (params[1].getType() != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require second parameter to be a String", getName()); - method = params[1].get(); + method = params[1].safeGet(); if (method != "auto" && method != "exact" && method != "asymp" && method != "asymptotic") throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown method in aggregate function {}. " "It must be one of: 'auto', 'exact', 'asymp' (or 'asymptotic')", getName()); diff --git a/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp b/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp index 6d1e3c0f64b..813b13b6f7b 100644 --- a/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp +++ b/src/AggregateFunctions/AggregateFunctionLargestTriangleThreeBuckets.cpp @@ -181,7 +181,7 @@ public: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a UInt64", getName()); - total_buckets = params[0].get(); + total_buckets = params[0].safeGet(); this->x_type = WhichDataType(arguments[0]).idx; this->y_type = WhichDataType(arguments[1]).idx; diff --git a/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp b/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp index f088737c340..fa90846650d 100644 --- a/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp +++ b/src/AggregateFunctions/AggregateFunctionMannWhitney.cpp @@ -152,7 +152,7 @@ public: if (params[0].getType() != Field::Types::String) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require first parameter to be a String", getName()); - const auto & param = params[0].get(); + const auto & param = params[0].safeGet(); if (param == "two-sided") alternative = Alternative::TwoSided; else if (param == "less") @@ -169,7 +169,7 @@ public: if (params[1].getType() != Field::Types::UInt64) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} require second parameter to be a UInt64", getName()); - continuity_correction = static_cast(params[1].get()); + continuity_correction = static_cast(params[1].safeGet()); } String getName() const override diff --git a/src/AggregateFunctions/AggregateFunctionQuantile.h b/src/AggregateFunctions/AggregateFunctionQuantile.h index 127dc06b642..423fd4bc569 100644 --- a/src/AggregateFunctions/AggregateFunctionQuantile.h +++ b/src/AggregateFunctions/AggregateFunctionQuantile.h @@ -117,7 +117,7 @@ public: throw Exception( ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} requires relative accuracy parameter with Float64 type", getName()); - relative_accuracy = relative_accuracy_field.get(); + relative_accuracy = relative_accuracy_field.safeGet(); if (relative_accuracy <= 0 || relative_accuracy >= 1 || isNaN(relative_accuracy)) throw Exception( @@ -147,9 +147,9 @@ public: ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Aggregate function {} requires accuracy parameter with integer type", getName()); if (accuracy_field.getType() == Field::Types::Int64) - accuracy = accuracy_field.get(); + accuracy = accuracy_field.safeGet(); else - accuracy = accuracy_field.get(); + accuracy = accuracy_field.safeGet(); if (accuracy <= 0) throw Exception( diff --git a/src/AggregateFunctions/AggregateFunctionSumMap.cpp b/src/AggregateFunctions/AggregateFunctionSumMap.cpp index 666136a91b7..9a94c3dfe1a 100644 --- a/src/AggregateFunctions/AggregateFunctionSumMap.cpp +++ b/src/AggregateFunctions/AggregateFunctionSumMap.cpp @@ -300,12 +300,12 @@ public: /// Compatibility with previous versions. if (value.getType() == Field::Types::Decimal32) { - auto source = value.get>(); + auto source = value.safeGet>(); value = DecimalField(source.getValue(), source.getScale()); } else if (value.getType() == Field::Types::Decimal64) { - auto source = value.get>(); + auto source = value.safeGet>(); value = DecimalField(source.getValue(), source.getScale()); } @@ -355,7 +355,7 @@ public: /// Compatibility with previous versions. if (value.getType() == Field::Types::Decimal128) { - auto source = value.get>(); + auto source = value.safeGet>(); WhichDataType value_type(values_types[col_idx]); if (value_type.isDecimal32()) { @@ -560,7 +560,7 @@ private: template bool compareImpl(FieldType & x) const { - auto val = rhs.get(); + auto val = rhs.safeGet(); if (val > x) { x = val; @@ -600,7 +600,7 @@ private: template bool compareImpl(FieldType & x) const { - auto val = rhs.get(); + auto val = rhs.safeGet(); if (val < x) { x = val; diff --git a/src/AggregateFunctions/fuzzers/CMakeLists.txt b/src/AggregateFunctions/fuzzers/CMakeLists.txt index 3ff7c86a00c..1ce0c52feb7 100644 --- a/src/AggregateFunctions/fuzzers/CMakeLists.txt +++ b/src/AggregateFunctions/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(aggregate_function_state_deserialization_fuzzer aggregate_function_state_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions) +target_link_libraries(aggregate_function_state_deserialization_fuzzer PRIVATE clickhouse_functions clickhouse_aggregate_functions) diff --git a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp index 76dc8ab94b4..f31920f8e33 100644 --- a/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp +++ b/src/Analyzer/Passes/ComparisonTupleEliminationPass.cpp @@ -137,7 +137,7 @@ private: if (constant_node_value.getType() != Field::Types::Which::Tuple) return {}; - const auto & constant_tuple = constant_node_value.get(); + const auto & constant_tuple = constant_node_value.safeGet(); const auto & function_arguments_nodes = function_node_typed.getArguments().getNodes(); size_t function_arguments_nodes_size = function_arguments_nodes.size(); diff --git a/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp b/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp index 2b2ac95d7b9..6c4ce789993 100644 --- a/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp +++ b/src/Analyzer/Passes/ConvertOrLikeChainPass.cpp @@ -89,7 +89,7 @@ public: if (!pattern || !isString(pattern->getResultType())) continue; - auto regexp = likePatternToRegexp(pattern->getValue().get()); + auto regexp = likePatternToRegexp(pattern->getValue().safeGet()); /// Case insensitive. Works with UTF-8 as well. if (is_ilike) regexp = "(?i)" + regexp; diff --git a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp index b968f43c6a6..1fc3eec6833 100644 --- a/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp +++ b/src/Analyzer/Passes/FunctionToSubcolumnsPass.cpp @@ -68,10 +68,10 @@ void optimizeFunctionEmpty(QueryTreeNodePtr &, FunctionNode & function_node, Col String getSubcolumnNameForElement(const Field & value, const DataTypeTuple & data_type_tuple) { if (value.getType() == Field::Types::String) - return value.get(); + return value.safeGet(); if (value.getType() == Field::Types::UInt64) - return data_type_tuple.getNameByPosition(value.get()); + return data_type_tuple.getNameByPosition(value.safeGet()); return ""; } @@ -79,7 +79,7 @@ String getSubcolumnNameForElement(const Field & value, const DataTypeTuple & dat String getSubcolumnNameForElement(const Field & value, const DataTypeVariant &) { if (value.getType() == Field::Types::String) - return value.get(); + return value.safeGet(); return ""; } diff --git a/src/Analyzer/Passes/FuseFunctionsPass.cpp b/src/Analyzer/Passes/FuseFunctionsPass.cpp index 0175e304a2b..f3b109a10ed 100644 --- a/src/Analyzer/Passes/FuseFunctionsPass.cpp +++ b/src/Analyzer/Passes/FuseFunctionsPass.cpp @@ -187,7 +187,7 @@ FunctionNodePtr createFusedQuantilesNode(std::vector & nodes /// Sort nodes and parameters in ascending order of quantile level std::vector permutation(nodes.size()); iota(permutation.data(), permutation.size(), size_t(0)); - std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].get() < parameters[j].get(); }); + std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].safeGet() < parameters[j].safeGet(); }); std::vector new_nodes; new_nodes.reserve(permutation.size()); diff --git a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp index d966f129d08..f81327c5d55 100644 --- a/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp +++ b/src/Analyzer/Passes/IfTransformStringsToEnumPass.cpp @@ -134,8 +134,8 @@ public: return; std::set string_values; - string_values.insert(first_literal->getValue().get()); - string_values.insert(second_literal->getValue().get()); + string_values.insert(first_literal->getValue().safeGet()); + string_values.insert(second_literal->getValue().safeGet()); changeIfArguments(*function_if_node, string_values, context); wrapIntoToString(*function_node, std::move(modified_if_node), context); @@ -163,7 +163,7 @@ public: if (!isArray(literal_to->getResultType()) || !isString(literal_default->getResultType())) return; - auto array_to = literal_to->getValue().get(); + auto array_to = literal_to->getValue().safeGet(); if (array_to.empty()) return; @@ -178,9 +178,9 @@ public: std::set string_values; for (const auto & value : array_to) - string_values.insert(value.get()); + string_values.insert(value.safeGet()); - string_values.insert(literal_default->getValue().get()); + string_values.insert(literal_default->getValue().safeGet()); changeTransformArguments(*function_modified_transform_node, string_values, context); wrapIntoToString(*function_node, std::move(modified_transform_node), context); diff --git a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp index 3a8b6e75d40..02f1c93ea7f 100644 --- a/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp +++ b/src/Analyzer/Passes/NormalizeCountVariantsPass.cpp @@ -54,7 +54,7 @@ public: } else if (function_node->getFunctionName() == "sum" && first_argument_constant_literal.getType() == Field::Types::UInt64 && - first_argument_constant_literal.get() == 1) + first_argument_constant_literal.safeGet() == 1) { function_node->getArguments().getNodes().clear(); resolveAggregateFunctionNodeByName(*function_node, "count"); diff --git a/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp b/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp index feb8bcc792d..0f33c302265 100644 --- a/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp +++ b/src/Analyzer/Passes/OptimizeDateOrDateTimeConverterWithPreimagePass.cpp @@ -143,13 +143,13 @@ private: const auto & column_type = column_node_typed.getColumnType().get(); if (isDateOrDate32(column_type)) { - start_date_or_date_time = date_lut.dateToString(range.first.get()); - end_date_or_date_time = date_lut.dateToString(range.second.get()); + start_date_or_date_time = date_lut.dateToString(range.first.safeGet()); + end_date_or_date_time = date_lut.dateToString(range.second.safeGet()); } else if (isDateTime(column_type) || isDateTime64(column_type)) { - start_date_or_date_time = date_lut.timeToString(range.first.get()); - end_date_or_date_time = date_lut.timeToString(range.second.get()); + start_date_or_date_time = date_lut.timeToString(range.first.safeGet()); + end_date_or_date_time = date_lut.timeToString(range.second.safeGet()); } else [[unlikely]] return {}; diff --git a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp index a48e88132a6..091061ceb81 100644 --- a/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp +++ b/src/Analyzer/Passes/RewriteAggregateFunctionWithIfPass.cpp @@ -60,7 +60,7 @@ public: { const auto & second_const_value = second_const_node->getValue(); if (second_const_value.isNull() - || (lower_name == "sum" && isInt64OrUInt64FieldType(second_const_value.getType()) && second_const_value.get() == 0 + || (lower_name == "sum" && isInt64OrUInt64FieldType(second_const_value.getType()) && second_const_value.safeGet() == 0 && !if_node->getResultType()->isNullable())) { /// avg(if(cond, a, null)) -> avgIf(a::ResultTypeIf, cond) @@ -89,7 +89,7 @@ public: { const auto & first_const_value = first_const_node->getValue(); if (first_const_value.isNull() - || (lower_name == "sum" && isInt64OrUInt64FieldType(first_const_value.getType()) && first_const_value.get() == 0 + || (lower_name == "sum" && isInt64OrUInt64FieldType(first_const_value.getType()) && first_const_value.safeGet() == 0 && !if_node->getResultType()->isNullable())) { /// avg(if(cond, null, a) -> avgIf(a::ResultTypeIf, !cond)) diff --git a/src/Analyzer/Passes/SumIfToCountIfPass.cpp b/src/Analyzer/Passes/SumIfToCountIfPass.cpp index 1524629dc81..a987ced497a 100644 --- a/src/Analyzer/Passes/SumIfToCountIfPass.cpp +++ b/src/Analyzer/Passes/SumIfToCountIfPass.cpp @@ -66,7 +66,7 @@ public: resolveAggregateFunctionNodeByName(*function_node, "countIf"); - if (constant_value_literal.get() != 1) + if (constant_value_literal.safeGet() != 1) { /// Rewrite `sumIf(123, cond)` into `123 * countIf(cond)` node = getMultiplyFunction(std::move(multiplier_node), node); @@ -105,8 +105,8 @@ public: const auto & if_true_condition_constant_value_literal = if_true_condition_constant_node->getValue(); const auto & if_false_condition_constant_value_literal = if_false_condition_constant_node->getValue(); - auto if_true_condition_value = if_true_condition_constant_value_literal.get(); - auto if_false_condition_value = if_false_condition_constant_value_literal.get(); + auto if_true_condition_value = if_true_condition_constant_value_literal.safeGet(); + auto if_false_condition_value = if_false_condition_constant_value_literal.safeGet(); if (if_false_condition_value == 0) { diff --git a/src/Analyzer/QueryTreeBuilder.cpp b/src/Analyzer/QueryTreeBuilder.cpp index fb41826929f..9754897d54d 100644 --- a/src/Analyzer/QueryTreeBuilder.cpp +++ b/src/Analyzer/QueryTreeBuilder.cpp @@ -471,7 +471,7 @@ QueryTreeNodePtr QueryTreeBuilder::buildSortList(const ASTPtr & order_by_express std::shared_ptr collator; if (order_by_element.getCollation()) - collator = std::make_shared(order_by_element.getCollation()->as().value.get()); + collator = std::make_shared(order_by_element.getCollation()->as().value.safeGet()); const auto & sort_expression_ast = order_by_element.children.at(0); auto sort_expression = buildExpression(sort_expression_ast, context); diff --git a/src/Analyzer/Resolve/IdentifierResolver.cpp b/src/Analyzer/Resolve/IdentifierResolver.cpp index 447bf825836..a79433ac130 100644 --- a/src/Analyzer/Resolve/IdentifierResolver.cpp +++ b/src/Analyzer/Resolve/IdentifierResolver.cpp @@ -1273,7 +1273,7 @@ QueryTreeNodePtr IdentifierResolver::matchArrayJoinSubcolumns( const auto & constant_node_value = constant_node.getValue(); if (constant_node_value.getType() == Field::Types::String) { - array_join_subcolumn_prefix = constant_node_value.get() + "."; + array_join_subcolumn_prefix = constant_node_value.safeGet() + "."; array_join_parent_column = argument_nodes.at(0).get(); } } @@ -1287,7 +1287,7 @@ QueryTreeNodePtr IdentifierResolver::matchArrayJoinSubcolumns( if (!second_argument || second_argument->getValue().getType() != Field::Types::String) throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected constant string as second argument of getSubcolumn function {}", resolved_function->dumpTree()); - const auto & resolved_subcolumn_path = second_argument->getValue().get(); + const auto & resolved_subcolumn_path = second_argument->getValue().safeGet(); if (!startsWith(resolved_subcolumn_path, array_join_subcolumn_prefix)) return {}; @@ -1331,7 +1331,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveExpressionFromArrayJoinExpression size_t nested_function_arguments_size = nested_function_arguments.size(); const auto & nested_keys_names_constant_node = nested_function_arguments[0]->as(); - const auto & nested_keys_names = nested_keys_names_constant_node.getValue().get(); + const auto & nested_keys_names = nested_keys_names_constant_node.getValue().safeGet(); size_t nested_keys_names_size = nested_keys_names.size(); if (nested_keys_names_size == nested_function_arguments_size - 1) @@ -1344,7 +1344,7 @@ QueryTreeNodePtr IdentifierResolver::tryResolveExpressionFromArrayJoinExpression auto array_join_column = std::make_shared(array_join_column_expression_typed.getColumn(), array_join_column_expression_typed.getColumnSource()); - const auto & nested_key_name = nested_keys_names[i - 1].get(); + const auto & nested_key_name = nested_keys_names[i - 1].safeGet(); Identifier nested_identifier = Identifier(nested_key_name); array_join_resolved_expression = wrapExpressionNodeInTupleElement(array_join_column, nested_identifier, scope.context); break; diff --git a/src/Analyzer/Resolve/QueryAnalyzer.cpp b/src/Analyzer/Resolve/QueryAnalyzer.cpp index e04b93314c5..2e8acf2f201 100644 --- a/src/Analyzer/Resolve/QueryAnalyzer.cpp +++ b/src/Analyzer/Resolve/QueryAnalyzer.cpp @@ -748,11 +748,11 @@ void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_ UInt64 pos; if (constant_node->getValue().getType() == Field::Types::UInt64) { - pos = constant_node->getValue().get(); + pos = constant_node->getValue().safeGet(); } else // Int64 { - auto value = constant_node->getValue().get(); + auto value = constant_node->getValue().safeGet(); if (value > 0) pos = value; else diff --git a/src/Analyzer/SetUtils.cpp b/src/Analyzer/SetUtils.cpp index e7d1a22f45a..59a243b27f3 100644 --- a/src/Analyzer/SetUtils.cpp +++ b/src/Analyzer/SetUtils.cpp @@ -99,7 +99,7 @@ Block createBlockFromCollection(const Collection & collection, const DataTypes& "Invalid type in set. Expected tuple, got {}", value.getTypeName()); - const auto & tuple = value.template get(); + const auto & tuple = value.template safeGet(); const DataTypePtr & value_type = value_types[collection_index]; const DataTypes & tuple_value_type = typeid_cast(value_type.get())->getElements(); @@ -175,15 +175,15 @@ Block getSetElementsForConstantValue(const DataTypePtr & expression_type, const if (rhs_which_type.isArray()) { const DataTypeArray * value_array_type = assert_cast(value_type.get()); - size_t value_array_size = value.get().size(); + size_t value_array_size = value.safeGet().size(); DataTypes value_types(value_array_size, value_array_type->getNestedType()); - result_block = createBlockFromCollection(value.get(), value_types, set_element_types, transform_null_in); + result_block = createBlockFromCollection(value.safeGet(), value_types, set_element_types, transform_null_in); } else if (rhs_which_type.isTuple()) { const DataTypeTuple * value_tuple_type = assert_cast(value_type.get()); const DataTypes & value_types = value_tuple_type->getElements(); - result_block = createBlockFromCollection(value.get(), value_types, set_element_types, transform_null_in); + result_block = createBlockFromCollection(value.safeGet(), value_types, set_element_types, transform_null_in); } else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, diff --git a/src/Backups/BackupSettings.cpp b/src/Backups/BackupSettings.cpp index 37ddd344001..e982a806b7c 100644 --- a/src/Backups/BackupSettings.cpp +++ b/src/Backups/BackupSettings.cpp @@ -126,7 +126,7 @@ std::vector BackupSettings::Util::clusterHostIDsFromAST(const IAST & as throw Exception( ErrorCodes::CANNOT_PARSE_BACKUP_SETTINGS, "Setting cluster_host_ids has wrong format, must be array of arrays of string literals"); - const auto & replicas = array_of_replicas->value.get(); + const auto & replicas = array_of_replicas->value.safeGet(); res[i].resize(replicas.size()); for (size_t j = 0; j != replicas.size(); ++j) { @@ -135,7 +135,7 @@ std::vector BackupSettings::Util::clusterHostIDsFromAST(const IAST & as throw Exception( ErrorCodes::CANNOT_PARSE_BACKUP_SETTINGS, "Setting cluster_host_ids has wrong format, must be array of arrays of string literals"); - res[i][j] = replica.get(); + res[i][j] = replica.safeGet(); } } } diff --git a/src/Backups/BackupsWorker.cpp b/src/Backups/BackupsWorker.cpp index 0b93ae6d547..8b45c816817 100644 --- a/src/Backups/BackupsWorker.cpp +++ b/src/Backups/BackupsWorker.cpp @@ -490,6 +490,8 @@ OperationID BackupsWorker::startMakingBackup(const ASTPtr & query, const Context /// process_list_element_holder is used to make an element in ProcessList live while BACKUP is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); + /// Update context to preserve query information in processlist (settings, current_database) + process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, @@ -853,6 +855,8 @@ OperationID BackupsWorker::startRestoring(const ASTPtr & query, ContextMutablePt /// process_list_element_holder is used to make an element in ProcessList live while RESTORE is working asynchronously. auto process_list_element = context_in_use->getProcessListElement(); + /// Update context to preserve query information in processlist (settings, current_database) + process_list_element->updateContext(context_in_use); thread_pool.scheduleOrThrowOnError( [this, diff --git a/src/Backups/DDLAdjustingForBackupVisitor.cpp b/src/Backups/DDLAdjustingForBackupVisitor.cpp index 910831195a3..4dcbdcc1617 100644 --- a/src/Backups/DDLAdjustingForBackupVisitor.cpp +++ b/src/Backups/DDLAdjustingForBackupVisitor.cpp @@ -46,8 +46,8 @@ namespace if (zookeeper_path_ast && (zookeeper_path_ast->value.getType() == Field::Types::String) && replica_name_ast && (replica_name_ast->value.getType() == Field::Types::String)) { - String & zookeeper_path_arg = zookeeper_path_ast->value.get(); - String & replica_name_arg = replica_name_ast->value.get(); + String & zookeeper_path_arg = zookeeper_path_ast->value.safeGet(); + String & replica_name_arg = replica_name_ast->value.safeGet(); if (create.uuid != UUIDHelpers::Nil) { String table_uuid_str = toString(create.uuid); diff --git a/src/Backups/RestoreSettings.cpp b/src/Backups/RestoreSettings.cpp index a974fc11d00..8e60e8d129e 100644 --- a/src/Backups/RestoreSettings.cpp +++ b/src/Backups/RestoreSettings.cpp @@ -31,7 +31,7 @@ namespace { if (field.getType() == Field::Types::String) { - const String & str = field.get(); + const String & str = field.safeGet(); if (str == "1" || boost::iequals(str, "true") || boost::iequals(str, "create")) { value = RestoreTableCreationMode::kCreate; @@ -54,7 +54,7 @@ namespace if (field.getType() == Field::Types::UInt64) { - UInt64 number = field.get(); + UInt64 number = field.safeGet(); if (number == 1) { value = RestoreTableCreationMode::kCreate; @@ -95,7 +95,7 @@ namespace { if (field.getType() == Field::Types::String) { - const String & str = field.get(); + const String & str = field.safeGet(); if (str == "1" || boost::iequals(str, "true") || boost::iequals(str, "create")) { value = RestoreAccessCreationMode::kCreate; @@ -118,7 +118,7 @@ namespace if (field.getType() == Field::Types::UInt64) { - UInt64 number = field.get(); + UInt64 number = field.safeGet(); if (number == 1) { value = RestoreAccessCreationMode::kCreate; diff --git a/src/Backups/SettingsFieldOptionalString.cpp b/src/Backups/SettingsFieldOptionalString.cpp index 573fd1e052c..684407a533d 100644 --- a/src/Backups/SettingsFieldOptionalString.cpp +++ b/src/Backups/SettingsFieldOptionalString.cpp @@ -19,7 +19,7 @@ SettingFieldOptionalString::SettingFieldOptionalString(const Field & field) if (field.getType() == Field::Types::String) { - value = field.get(); + value = field.safeGet(); return; } diff --git a/src/Backups/SettingsFieldOptionalUUID.cpp b/src/Backups/SettingsFieldOptionalUUID.cpp index 3f14608b206..0011f7f1073 100644 --- a/src/Backups/SettingsFieldOptionalUUID.cpp +++ b/src/Backups/SettingsFieldOptionalUUID.cpp @@ -22,7 +22,7 @@ namespace ErrorCodes if (field.getType() == Field::Types::String) { - const String & str = field.get(); + const String & str = field.safeGet(); if (str.empty()) { value = std::nullopt; diff --git a/src/Backups/WithRetries.cpp b/src/Backups/WithRetries.cpp index 181e6331ac9..9f22085f5a9 100644 --- a/src/Backups/WithRetries.cpp +++ b/src/Backups/WithRetries.cpp @@ -68,13 +68,19 @@ const WithRetries::KeeperSettings & WithRetries::getKeeperSettings() const WithRetries::FaultyKeeper WithRetries::getFaultyZooKeeper() const { - /// We need to create new instance of ZooKeeperWithFaultInjection each time a copy a pointer to ZooKeeper client there + zkutil::ZooKeeperPtr current_zookeeper; + { + std::lock_guard lock(zookeeper_mutex); + current_zookeeper = zookeeper; + } + + /// We need to create new instance of ZooKeeperWithFaultInjection each time and copy a pointer to ZooKeeper client there /// The reason is that ZooKeeperWithFaultInjection may reset the underlying pointer and there could be a race condition /// when the same object is used from multiple threads. auto faulty_zookeeper = ZooKeeperWithFaultInjection::createInstance( settings.keeper_fault_injection_probability, settings.keeper_fault_injection_seed, - zookeeper, + current_zookeeper, log->name(), log); diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 98dd0601a1b..95e431b54be 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,4 +1,4 @@ -add_compile_options($<$,$>:${COVERAGE_FLAGS}>) +add_compile_options("$<$,$>:${COVERAGE_FLAGS}>") if (USE_INCLUDE_WHAT_YOU_USE) set (CMAKE_CXX_INCLUDE_WHAT_YOU_USE ${IWYU_PATH}) @@ -353,8 +353,8 @@ target_link_libraries(clickhouse_common_io Poco::Foundation ) -if (TARGET ch_contrib::fiu) - target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::fiu) +if (TARGET ch_contrib::libfiu) + target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::libfiu) endif() if (TARGET ch_contrib::cpuid) @@ -556,14 +556,13 @@ target_link_libraries (clickhouse_common_io PRIVATE ch_contrib::lz4) if (TARGET ch_contrib::qpl) dbms_target_link_libraries(PUBLIC ch_contrib::qpl) + target_link_libraries (clickhouse_compression PUBLIC ch_contrib::qpl) + target_link_libraries (clickhouse_compression PUBLIC ch_contrib::accel-config) endif () -if (TARGET ch_contrib::accel-config) - dbms_target_link_libraries(PUBLIC ch_contrib::accel-config) -endif () - -if (TARGET ch_contrib::qatzstd_plugin) +if (TARGET ch_contrib::accel-config AND TARGET ch_contrib::qatzstd_plugin) dbms_target_link_libraries(PUBLIC ch_contrib::qatzstd_plugin) + dbms_target_link_libraries(PUBLIC ch_contrib::accel-config) target_link_libraries(clickhouse_common_io PUBLIC ch_contrib::qatzstd_plugin) endif () @@ -602,10 +601,6 @@ endif() dbms_target_link_libraries(PUBLIC ch_contrib::consistent_hashing) -if (TARGET ch_contrib::annoy) - dbms_target_link_libraries(PUBLIC ch_contrib::annoy) -endif() - if (TARGET ch_contrib::usearch) dbms_target_link_libraries(PUBLIC ch_contrib::usearch) endif() diff --git a/src/Client/ClientApplicationBase.cpp b/src/Client/ClientApplicationBase.cpp index 9f133616d2e..0649aa5f5d7 100644 --- a/src/Client/ClientApplicationBase.cpp +++ b/src/Client/ClientApplicationBase.cpp @@ -158,6 +158,8 @@ void ClientApplicationBase::init(int argc, char ** argv) ("config-file,C", po::value(), "config-file path") + ("proto_caps", po::value(), "enable/disable chunked protocol: chunked_optional, notchunked, notchunked_optional, send_chunked, send_chunked_optional, send_notchunked, send_notchunked_optional, recv_chunked, recv_chunked_optional, recv_notchunked, recv_notchunked_optional") + ("query,q", po::value>()->multitoken(), R"(Query. Can be specified multiple times (--query "SELECT 1" --query "SELECT 2") or once with multiple comma-separated queries (--query "SELECT 1; SELECT 2;"). In the latter case, INSERT queries with non-VALUE format must be separated by empty lines.)") ("queries-file", po::value>()->multitoken(), "file path with queries to execute; multiple files can be specified (--queries-file file1 file2...)") ("multiquery,n", "Obsolete, does nothing") @@ -200,8 +202,6 @@ void ClientApplicationBase::init(int argc, char ** argv) ("pager", po::value(), "Pipe all output into this command (less or similar)") ("max_memory_usage_in_client", po::value(), "Set memory limit in client/local server") - ("fuzzer-args", po::value(), "Command line arguments for the LLVM's libFuzzer driver. Only relevant if the application is compiled with libFuzzer.") - ("client_logs_file", po::value(), "Path to a file for writing client logs. Currently we only have fatal logs (when the client crashes)") ; @@ -339,6 +339,41 @@ void ClientApplicationBase::init(int argc, char ** argv) if (options.count("server_logs_file")) server_logs_file = options["server_logs_file"].as(); + if (options.count("proto_caps")) + { + std::string proto_caps_str = options["proto_caps"].as(); + + std::vector proto_caps; + splitInto<','>(proto_caps, proto_caps_str); + + for (auto cap_str : proto_caps) + { + std::string direction; + + if (cap_str.starts_with("send_")) + { + direction = "send"; + cap_str = cap_str.substr(std::string_view("send_").size()); + } + else if (cap_str.starts_with("recv_")) + { + direction = "recv"; + cap_str = cap_str.substr(std::string_view("recv_").size()); + } + + if (cap_str != "chunked" && cap_str != "notchunked" && cap_str != "chunked_optional" && cap_str != "notchunked_optional") + throw Exception(ErrorCodes::BAD_ARGUMENTS, "proto_caps option is incorrect ({})", proto_caps_str); + + if (direction.empty()) + { + config().setString("proto_caps.send", std::string(cap_str)); + config().setString("proto_caps.recv", std::string(cap_str)); + } + else + config().setString("proto_caps." + direction, std::string(cap_str)); + } + } + query_processing_stage = QueryProcessingStage::fromString(options["stage"].as()); query_kind = parseQueryKind(options["query_kind"].as()); profile_events.print = options.count("print-profile-events"); diff --git a/src/Client/ClientBase.cpp b/src/Client/ClientBase.cpp index 3f4c1cab31b..187ef079eda 100644 --- a/src/Client/ClientBase.cpp +++ b/src/Client/ClientBase.cpp @@ -73,9 +73,11 @@ #include #include #include +#include #include #include +#include #include "config.h" #include #include @@ -914,6 +916,8 @@ void ClientBase::processTextAsSingleQuery(const String & full_query) } catch (Exception & e) { + if (server_exception) + server_exception->rethrow(); if (!is_interactive) e.addMessage("(in query: {})", full_query); throw; @@ -1032,19 +1036,28 @@ void ClientBase::processOrdinaryQuery(const String & query_to_execute, ASTPtr pa query_interrupt_handler.start(signals_before_stop); SCOPE_EXIT({ query_interrupt_handler.stop(); }); - connection->sendQuery( - connection_parameters.timeouts, - query, - query_parameters, - client_context->getCurrentQueryId(), - query_processing_stage, - &client_context->getSettingsRef(), - &client_context->getClientInfo(), - true, - [&](const Progress & progress) { onProgress(progress); }); + try { + connection->sendQuery( + connection_parameters.timeouts, + query, + query_parameters, + client_context->getCurrentQueryId(), + query_processing_stage, + &client_context->getSettingsRef(), + &client_context->getClientInfo(), + true, + [&](const Progress & progress) { onProgress(progress); }); + + if (send_external_tables) + sendExternalTables(parsed_query); + } + catch (const NetException &) + { + // We still want to attempt to process whatever we already received or can receive (socket receive buffer can be not empty) + receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); + throw; + } - if (send_external_tables) - sendExternalTables(parsed_query); receiveResult(parsed_query, signals_before_stop, settings.partial_result_on_first_cancel); break; @@ -2751,7 +2764,7 @@ void ClientBase::runLibFuzzer() for (auto & arg : fuzzer_args_holder) fuzzer_args.emplace_back(arg.data()); - int fuzzer_argc = fuzzer_args.size(); + int fuzzer_argc = static_cast(fuzzer_args.size()); char ** fuzzer_argv = fuzzer_args.data(); LLVMFuzzerRunDriver(&fuzzer_argc, &fuzzer_argv, [](const uint8_t * data, size_t size) diff --git a/src/Client/Connection.cpp b/src/Client/Connection.cpp index 07f4bf19f05..e89bd7a2bf5 100644 --- a/src/Client/Connection.cpp +++ b/src/Client/Connection.cpp @@ -5,8 +5,6 @@ #include #include #include -#include -#include #include #include #include @@ -85,6 +83,7 @@ Connection::~Connection() Connection::Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, const String & proto_recv_chunked_, [[maybe_unused]] const SSHKey & ssh_private_key_, const String & jwt_, const String & quota_key_, @@ -95,6 +94,7 @@ Connection::Connection(const String & host_, UInt16 port_, Protocol::Secure secure_) : host(host_), port(port_), default_database(default_database_) , user(user_), password(password_) + , proto_send_chunked(proto_send_chunked_), proto_recv_chunked(proto_recv_chunked_) #if USE_SSH , ssh_private_key(ssh_private_key_) #endif @@ -211,10 +211,10 @@ void Connection::connect(const ConnectionTimeouts & timeouts) , tcp_keep_alive_timeout_in_sec); } - in = std::make_shared(*socket); + in = std::make_shared(*socket); in->setAsyncCallback(async_callback); - out = std::make_shared(*socket); + out = std::make_shared(*socket); out->setAsyncCallback(async_callback); connected = true; setDescription(); @@ -222,9 +222,61 @@ void Connection::connect(const ConnectionTimeouts & timeouts) sendHello(); receiveHello(timeouts.handshake_timeout); + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + /// Client side of chunked protocol negotiation. + /// Server advertises its protocol capabilities (separate for send and receive channels) by sending + /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. + /// Not optional types are strict meaning that server only supports this type, optional means that + /// server prefer this type but capable to work in opposite. + /// Client selects which type it is going to communicate based on the settings from config or arguments, + /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. + /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example + /// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case + /// client should interrupt this connection. However if client continues with incompatible protocol type request, server + /// will send appropriate exception and disconnect client. + + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) + { + bool chunked_srv = chunked_srv_str.starts_with("chunked"); + bool optional_srv = chunked_srv_str.ends_with("_optional"); + bool chunked_cl = chunked_cl_str.starts_with("chunked"); + bool optional_cl = chunked_cl_str.ends_with("_optional"); + + if (optional_srv) + return chunked_cl; + if (optional_cl) + return chunked_srv; + if (chunked_cl != chunked_srv) + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: {} set to {}, server requires {}", + direction, + chunked_cl ? "chunked" : "notchunked", + chunked_srv ? "chunked" : "notchunked"); + + return chunked_srv; + }; + + proto_send_chunked = is_chunked(proto_recv_chunked_srv, proto_send_chunked, "send") ? "chunked" : "notchunked"; + proto_recv_chunked = is_chunked(proto_send_chunked_srv, proto_recv_chunked, "recv") ? "chunked" : "notchunked"; + } + else + { + if (proto_send_chunked == "chunked" || proto_recv_chunked == "chunked") + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: server's version is too old and doesn't support chunked protocol while client settings require it."); + } + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) sendAddendum(); + if (proto_send_chunked == "chunked") + out->enableChunked(); + if (proto_recv_chunked == "chunked") + in->enableChunked(); + LOG_TRACE(log_wrapper.get(), "Connected to {} server version {}.{}.{}.", server_name, server_version_major, server_version_minor, server_version_patch); } @@ -393,6 +445,13 @@ void Connection::sendAddendum() { if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY) writeStringBinary(quota_key, *out); + + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + writeStringBinary(proto_send_chunked, *out); + writeStringBinary(proto_recv_chunked, *out); + } + out->next(); } @@ -472,6 +531,12 @@ void Connection::receiveHello(const Poco::Timespan & handshake_timeout) else server_version_patch = server_revision; + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + readStringBinary(proto_send_chunked_srv, *in); + readStringBinary(proto_recv_chunked_srv, *in); + } + if (server_revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { UInt64 rules_size; @@ -611,6 +676,7 @@ bool Connection::ping(const ConnectionTimeouts & timeouts) UInt64 pong = 0; writeVarUInt(Protocol::Client::Ping, *out); + out->finishChunk(); out->next(); if (in->eof()) @@ -660,6 +726,7 @@ TablesStatusResponse Connection::getTablesStatus(const ConnectionTimeouts & time writeVarUInt(Protocol::Client::TablesStatusRequest, *out); request.write(*out, server_revision); + out->finishChunk(); out->next(); UInt64 response_type = 0; @@ -813,6 +880,8 @@ void Connection::sendQuery( block_profile_events_in.reset(); block_out.reset(); + out->finishChunk(); + /// Send empty block which means end of data. if (!with_pending_data) { @@ -829,6 +898,7 @@ void Connection::sendCancel() return; writeVarUInt(Protocol::Client::Cancel, *out); + out->finishChunk(); out->next(); } @@ -854,7 +924,10 @@ void Connection::sendData(const Block & block, const String & name, bool scalar) size_t prev_bytes = out->count(); block_out->write(block); - maybe_compressed_out->next(); + if (maybe_compressed_out != out) + maybe_compressed_out->next(); + if (!block) + out->finishChunk(); out->next(); if (throttler) @@ -865,6 +938,7 @@ void Connection::sendIgnoredPartUUIDs(const std::vector & uuids) { writeVarUInt(Protocol::Client::IgnoredPartUUIDs, *out); writeVectorBinary(uuids, *out); + out->finishChunk(); out->next(); } @@ -874,6 +948,7 @@ void Connection::sendReadTaskResponse(const String & response) writeVarUInt(Protocol::Client::ReadTaskResponse, *out); writeVarUInt(DBMS_CLUSTER_PROCESSING_PROTOCOL_VERSION, *out); writeStringBinary(response, *out); + out->finishChunk(); out->next(); } @@ -882,6 +957,7 @@ void Connection::sendMergeTreeReadTaskResponse(const ParallelReadResponse & resp { writeVarUInt(Protocol::Client::MergeTreeReadTaskResponse, *out); response.serialize(*out); + out->finishChunk(); out->next(); } @@ -899,6 +975,8 @@ void Connection::sendPreparedData(ReadBuffer & input, size_t size, const String copyData(input, *out); else copyData(input, *out, size); + + out->finishChunk(); out->next(); } @@ -927,6 +1005,8 @@ void Connection::sendScalarsData(Scalars & data) sendData(elem.second, elem.first, true /* scalar */); } + out->finishChunk(); + out_bytes = out->count() - out_bytes; maybe_compressed_out_bytes = maybe_compressed_out->count() - maybe_compressed_out_bytes; double elapsed = watch.elapsedSeconds(); @@ -1069,13 +1149,13 @@ std::optional Connection::getResolvedAddress() const bool Connection::poll(size_t timeout_microseconds) { - return static_cast(*in).poll(timeout_microseconds); + return in->poll(timeout_microseconds); } bool Connection::hasReadPendingData() const { - return last_input_packet_type.has_value() || static_cast(*in).hasPendingData(); + return last_input_packet_type.has_value() || in->hasBufferedData(); } @@ -1349,6 +1429,8 @@ ServerConnectionPtr Connection::createConnection(const ConnectionParameters & pa parameters.default_database, parameters.user, parameters.password, + parameters.proto_send_chunked, + parameters.proto_recv_chunked, parameters.ssh_private_key, parameters.jwt, parameters.quota_key, diff --git a/src/Client/Connection.h b/src/Client/Connection.h index 0f4b3e436df..ed84bc51318 100644 --- a/src/Client/Connection.h +++ b/src/Client/Connection.h @@ -8,8 +8,8 @@ #include -#include -#include +#include +#include #include #include @@ -52,6 +52,7 @@ public: Connection(const String & host_, UInt16 port_, const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, const String & proto_recv_chunked_, const SSHKey & ssh_private_key_, const String & jwt_, const String & quota_key_, @@ -170,6 +171,10 @@ private: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; + String proto_send_chunked_srv; + String proto_recv_chunked_srv; #if USE_SSH SSHKey ssh_private_key; #endif @@ -209,8 +214,8 @@ private: String server_display_name; std::unique_ptr socket; - std::shared_ptr in; - std::shared_ptr out; + std::shared_ptr in; + std::shared_ptr out; std::optional last_input_packet_type; String query_id; diff --git a/src/Client/ConnectionParameters.cpp b/src/Client/ConnectionParameters.cpp index 303bebc30d2..4d0a9ffa08c 100644 --- a/src/Client/ConnectionParameters.cpp +++ b/src/Client/ConnectionParameters.cpp @@ -107,6 +107,9 @@ ConnectionParameters::ConnectionParameters(const Poco::Util::AbstractConfigurati } } + proto_send_chunked = config.getString("proto_caps.send", "notchunked"); + proto_recv_chunked = config.getString("proto_caps.recv", "notchunked"); + quota_key = config.getString("quota_key", ""); /// By default compression is disabled if address looks like localhost. diff --git a/src/Client/ConnectionParameters.h b/src/Client/ConnectionParameters.h index c305c7813f2..382bfe34a3d 100644 --- a/src/Client/ConnectionParameters.h +++ b/src/Client/ConnectionParameters.h @@ -20,6 +20,8 @@ struct ConnectionParameters std::string default_database; std::string user; std::string password; + std::string proto_send_chunked = "notchunked"; + std::string proto_recv_chunked = "notchunked"; std::string quota_key; SSHKey ssh_private_key; std::string jwt; diff --git a/src/Client/ConnectionPool.cpp b/src/Client/ConnectionPool.cpp index ed2e7c3c725..ab8ad08826c 100644 --- a/src/Client/ConnectionPool.cpp +++ b/src/Client/ConnectionPool.cpp @@ -13,6 +13,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get( String default_database, String user, String password, + String proto_send_chunked, + String proto_recv_chunked, String quota_key, String cluster, String cluster_secret, @@ -22,7 +24,7 @@ ConnectionPoolPtr ConnectionPoolFactory::get( Priority priority) { Key key{ - max_connections, host, port, default_database, user, password, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; + max_connections, host, port, default_database, user, password, proto_send_chunked, proto_recv_chunked, quota_key, cluster, cluster_secret, client_name, compression, secure, priority}; std::lock_guard lock(mutex); auto [it, inserted] = pools.emplace(key, ConnectionPoolPtr{}); @@ -39,6 +41,8 @@ ConnectionPoolPtr ConnectionPoolFactory::get( default_database, user, password, + proto_send_chunked, + proto_recv_chunked, quota_key, cluster, cluster_secret, diff --git a/src/Client/ConnectionPool.h b/src/Client/ConnectionPool.h index 0fcb3c4e7e1..219548b62a0 100644 --- a/src/Client/ConnectionPool.h +++ b/src/Client/ConnectionPool.h @@ -73,6 +73,8 @@ public: const String & default_database_, const String & user_, const String & password_, + const String & proto_send_chunked_, + const String & proto_recv_chunked_, const String & quota_key_, const String & cluster_, const String & cluster_secret_, @@ -85,6 +87,8 @@ public: , default_database(default_database_) , user(user_) , password(password_) + , proto_send_chunked(proto_send_chunked_) + , proto_recv_chunked(proto_recv_chunked_) , quota_key(quota_key_) , cluster(cluster_) , cluster_secret(cluster_secret_) @@ -116,7 +120,9 @@ protected: { return std::make_shared( host, port, - default_database, user, password, SSHKey(), /*jwt*/ "", quota_key, + default_database, user, password, + proto_send_chunked, proto_recv_chunked, + SSHKey(), /*jwt*/ "", quota_key, cluster, cluster_secret, client_name, compression, secure); } @@ -125,6 +131,8 @@ private: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; /// For inter-server authorization @@ -150,6 +158,8 @@ public: String default_database; String user; String password; + String proto_send_chunked; + String proto_recv_chunked; String quota_key; String cluster; String cluster_secret; @@ -173,6 +183,8 @@ public: String default_database, String user, String password, + String proto_send_chunked, + String proto_recv_chunked, String quota_key, String cluster, String cluster_secret, @@ -190,6 +202,7 @@ inline bool operator==(const ConnectionPoolFactory::Key & lhs, const ConnectionP { return lhs.max_connections == rhs.max_connections && lhs.host == rhs.host && lhs.port == rhs.port && lhs.default_database == rhs.default_database && lhs.user == rhs.user && lhs.password == rhs.password + && lhs.proto_send_chunked == rhs.proto_send_chunked && lhs.proto_recv_chunked == rhs.proto_recv_chunked && lhs.quota_key == rhs.quota_key && lhs.cluster == rhs.cluster && lhs.cluster_secret == rhs.cluster_secret && lhs.client_name == rhs.client_name && lhs.compression == rhs.compression && lhs.secure == rhs.secure && lhs.priority == rhs.priority; diff --git a/src/Client/Suggest.cpp b/src/Client/Suggest.cpp index 0188ebc8173..affd620f83a 100644 --- a/src/Client/Suggest.cpp +++ b/src/Client/Suggest.cpp @@ -214,7 +214,7 @@ void Suggest::fillWordsFromBlock(const Block & block) Words new_words; new_words.reserve(rows); for (size_t i = 0; i < rows; ++i) - new_words.emplace_back(column[i].get()); + new_words.emplace_back(column[i].safeGet()); addWords(std::move(new_words)); } diff --git a/src/Columns/ColumnAggregateFunction.cpp b/src/Columns/ColumnAggregateFunction.cpp index 4bc48c62eb4..d3363d91a46 100644 --- a/src/Columns/ColumnAggregateFunction.cpp +++ b/src/Columns/ColumnAggregateFunction.cpp @@ -457,9 +457,9 @@ MutableColumnPtr ColumnAggregateFunction::cloneEmpty() const Field ColumnAggregateFunction::operator[](size_t n) const { Field field = AggregateFunctionStateData(); - field.get().name = type_string; + field.safeGet().name = type_string; { - WriteBufferFromString buffer(field.get().data); + WriteBufferFromString buffer(field.safeGet().data); func->serialize(data[n], buffer, version); } return field; @@ -467,12 +467,7 @@ Field ColumnAggregateFunction::operator[](size_t n) const void ColumnAggregateFunction::get(size_t n, Field & res) const { - res = AggregateFunctionStateData(); - res.get().name = type_string; - { - WriteBufferFromString buffer(res.get().data); - func->serialize(data[n], buffer, version); - } + res = operator[](n); } StringRef ColumnAggregateFunction::getDataAt(size_t n) const @@ -552,7 +547,7 @@ void ColumnAggregateFunction::insert(const Field & x) "Inserting field of type {} into ColumnAggregateFunction. Expected {}", x.getTypeName(), Field::Types::AggregateFunctionState); - const auto & field_name = x.get().name; + const auto & field_name = x.safeGet().name; if (type_string != field_name) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Cannot insert filed with type {} into column with type {}", field_name, type_string); @@ -560,7 +555,7 @@ void ColumnAggregateFunction::insert(const Field & x) ensureOwnership(); Arena & arena = createOrGetArena(); pushBackAndCreateState(data, arena, func.get()); - ReadBufferFromString read_buffer(x.get().data); + ReadBufferFromString read_buffer(x.safeGet().data); func->deserialize(data.back(), read_buffer, version, &arena); } @@ -569,14 +564,14 @@ bool ColumnAggregateFunction::tryInsert(const DB::Field & x) if (x.getType() != Field::Types::AggregateFunctionState) return false; - const auto & field_name = x.get().name; + const auto & field_name = x.safeGet().name; if (type_string != field_name) return false; ensureOwnership(); Arena & arena = createOrGetArena(); pushBackAndCreateState(data, arena, func.get()); - ReadBufferFromString read_buffer(x.get().data); + ReadBufferFromString read_buffer(x.safeGet().data); func->deserialize(data.back(), read_buffer, version, &arena); return true; } diff --git a/src/Columns/ColumnArray.cpp b/src/Columns/ColumnArray.cpp index 19cce678cc7..26841879474 100644 --- a/src/Columns/ColumnArray.cpp +++ b/src/Columns/ColumnArray.cpp @@ -141,7 +141,7 @@ void ColumnArray::get(size_t n, Field & res) const size, max_array_size_as_field); res = Array(); - Array & res_arr = res.get(); + Array & res_arr = res.safeGet(); res_arr.reserve(size); for (size_t i = 0; i < size; ++i) @@ -309,7 +309,7 @@ void ColumnArray::updateHashFast(SipHash & hash) const void ColumnArray::insert(const Field & x) { - const Array & array = x.get(); + const Array & array = x.safeGet(); size_t size = array.size(); for (size_t i = 0; i < size; ++i) getData().insert(array[i]); @@ -321,7 +321,7 @@ bool ColumnArray::tryInsert(const Field & x) if (x.getType() != Field::Types::Which::Array) return false; - const Array & array = x.get(); + const Array & array = x.safeGet(); size_t size = array.size(); for (size_t i = 0; i < size; ++i) { @@ -452,6 +452,22 @@ void ColumnArray::reserve(size_t n) getData().reserve(n); /// The average size of arrays is not taken into account here. Or it is considered to be no more than 1. } +void ColumnArray::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + Columns source_data_columns; + source_data_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + { + const auto & source_array_column = assert_cast(*source_column); + new_size += source_array_column.size(); + source_data_columns.push_back(source_array_column.getDataPtr()); + } + + getOffsets().reserve_exact(new_size); + data->prepareForSquashing(source_data_columns); +} + void ColumnArray::shrinkToFit() { getOffsets().shrink_to_fit(); diff --git a/src/Columns/ColumnArray.h b/src/Columns/ColumnArray.h index 63affb86d9d..d6f71b72940 100644 --- a/src/Columns/ColumnArray.h +++ b/src/Columns/ColumnArray.h @@ -118,6 +118,7 @@ public: void updatePermutationWithCollation(const Collator & collator, PermutationSortDirection direction, PermutationSortStability stability, size_t limit, int nan_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnDecimal.h b/src/Columns/ColumnDecimal.h index 59bfbd2159c..07120f5f035 100644 --- a/src/Columns/ColumnDecimal.h +++ b/src/Columns/ColumnDecimal.h @@ -74,7 +74,7 @@ public: void insertData(const char * src, size_t /*length*/) override; void insertDefault() override { data.push_back(T()); } void insertManyDefaults(size_t length) override { data.resize_fill(data.size() + length); } - void insert(const Field & x) override { data.push_back(x.get()); } + void insert(const Field & x) override { data.push_back(x.safeGet()); } bool tryInsert(const Field & x) override; #if !defined(DEBUG_OR_SANITIZER_BUILD) void insertRangeFrom(const IColumn & src, size_t start, size_t length) override; diff --git a/src/Columns/ColumnDynamic.cpp b/src/Columns/ColumnDynamic.cpp index a92d54dd675..69b4c5dfc4e 100644 --- a/src/Columns/ColumnDynamic.cpp +++ b/src/Columns/ColumnDynamic.cpp @@ -1,16 +1,21 @@ #include #include +#include #include #include #include +#include #include #include #include #include #include #include -#include +#include +#include +#include +#include #include namespace DB @@ -22,31 +27,77 @@ namespace ErrorCodes extern const int PARAMETER_OUT_OF_BOUND; } - -ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_dynamic_types_) +namespace { - /// Create empty Variant. - variant_info.variant_type = std::make_shared(DataTypes{}); - variant_info.variant_name = variant_info.variant_type->getName(); - variant_column = variant_info.variant_type->createColumn(); + +/// Static default format settings to avoid creating it every time. +const FormatSettings & getFormatSettings() +{ + static const FormatSettings settings; + return settings; +} + +} + +/// Shared variant will contain String values but we cannot use usual String type +/// because we can have regular variant with type String. +/// To solve it, we use String type with custom name for shared variant. +DataTypePtr ColumnDynamic::getSharedVariantDataType() +{ + return DataTypeFactory::instance().getCustom("String", std::make_unique(std::make_unique(getSharedVariantTypeName()))); +} + +ColumnDynamic::ColumnDynamic(size_t max_dynamic_types_) : max_dynamic_types(max_dynamic_types_), global_max_dynamic_types(max_dynamic_types) +{ + /// Create Variant with shared variant. + setVariantType(std::make_shared(DataTypes{getSharedVariantDataType()})); } ColumnDynamic::ColumnDynamic( - MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_) + MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) + : variant_column(std::move(variant_column_)) + , max_dynamic_types(max_dynamic_types_) + , global_max_dynamic_types(global_max_dynamic_types_) + , statistics(statistics_) +{ + createVariantInfo(variant_type_); +} + +ColumnDynamic::ColumnDynamic( + MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_) : variant_column(std::move(variant_column_)) , variant_info(variant_info_) , max_dynamic_types(max_dynamic_types_) + , global_max_dynamic_types(global_max_dynamic_types_) , statistics(statistics_) { } -ColumnDynamic::MutablePtr ColumnDynamic::create(MutableColumnPtr variant_column, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_) +void ColumnDynamic::setVariantType(const DataTypePtr & variant_type) +{ + if (variant_column && !empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific variant type is allowed only for empty dynamic column"); + + variant_column = variant_type->createColumn(); + createVariantInfo(variant_type); +} + +void ColumnDynamic::setMaxDynamicPaths(size_t max_dynamic_type_) +{ + if (variant_column && !empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Setting specific max_dynamic_type parameter is allowed only for empty dynamic column"); + + max_dynamic_types = max_dynamic_type_; +} + +void ColumnDynamic::createVariantInfo(const DataTypePtr & variant_type) { - VariantInfo variant_info; variant_info.variant_type = variant_type; variant_info.variant_name = variant_type->getName(); const auto & variants = assert_cast(*variant_type).getVariants(); + variant_info.variant_names.clear(); variant_info.variant_names.reserve(variants.size()); + variant_info.variant_name_to_discriminator.clear(); variant_info.variant_name_to_discriminator.reserve(variants.size()); for (ColumnVariant::Discriminator discr = 0; discr != variants.size(); ++discr) { @@ -54,30 +105,26 @@ ColumnDynamic::MutablePtr ColumnDynamic::create(MutableColumnPtr variant_column, variant_info.variant_name_to_discriminator[variant_name] = discr; } - return create(std::move(variant_column), variant_info, max_dynamic_types_, statistics_); + if (!variant_info.variant_name_to_discriminator.contains(getSharedVariantTypeName())) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Variant in Dynamic column doesn't contain shared variant"); } -bool ColumnDynamic::addNewVariant(const DB::DataTypePtr & new_variant) +bool ColumnDynamic::addNewVariant(const DataTypePtr & new_variant, const String & new_variant_name) { /// Check if we already have such variant. - if (variant_info.variant_name_to_discriminator.contains(new_variant->getName())) + if (variant_info.variant_name_to_discriminator.contains(new_variant_name)) return true; /// Check if we reached maximum number of variants. - if (variant_info.variant_names.size() >= max_dynamic_types) + if (!canAddNewVariant()) { - /// ColumnDynamic can have max_dynamic_types number of variants only when it has String as a variant. - /// Otherwise we won't be able to cast new variants to Strings. - if (!variant_info.variant_name_to_discriminator.contains("String")) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Maximum number of variants reached, but no String variant exists"); + /// Dynamic column should always have shared variant. + if (!variant_info.variant_name_to_discriminator.contains(getSharedVariantTypeName())) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Maximum number of variants reached, but no shared variant exists"); return false; } - /// If we have (max_dynamic_types - 1) number of variants and don't have String variant, we can add only String variant. - if (variant_info.variant_names.size() == max_dynamic_types - 1 && new_variant->getName() != "String" && !variant_info.variant_name_to_discriminator.contains("String")) - return false; - const DataTypes & current_variants = assert_cast(*variant_info.variant_type).getVariants(); DataTypes all_variants = current_variants; all_variants.push_back(new_variant); @@ -86,21 +133,15 @@ bool ColumnDynamic::addNewVariant(const DB::DataTypePtr & new_variant) return true; } -void ColumnDynamic::addStringVariant() +void extendVariantColumn( + IColumn & variant_column, + const DataTypePtr & old_variant_type, + const DataTypePtr & new_variant_type, + std::unordered_map old_variant_name_to_discriminator) { - if (!addNewVariant(std::make_shared())) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot add String variant to Dynamic column, it's a bug"); -} - -void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DB::DataTypePtr & new_variant_type) -{ - const DataTypes & current_variants = assert_cast(variant_info.variant_type.get())->getVariants(); + const DataTypes & current_variants = assert_cast(old_variant_type.get())->getVariants(); const DataTypes & new_variants = assert_cast(new_variant_type.get())->getVariants(); - Names new_variant_names; - new_variant_names.reserve(new_variants.size()); - std::unordered_map new_variant_name_to_discriminator; - new_variant_name_to_discriminator.reserve(new_variants.size()); std::vector> new_variant_columns_and_discriminators_to_add; new_variant_columns_and_discriminators_to_add.reserve(new_variants.size() - current_variants.size()); std::vector current_to_new_discriminators; @@ -108,26 +149,26 @@ void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DB::DataTypePt for (ColumnVariant::Discriminator discr = 0; discr != new_variants.size(); ++discr) { - const auto & name = new_variant_names.emplace_back(new_variants[discr]->getName()); - new_variant_name_to_discriminator[name] = discr; - - auto current_it = variant_info.variant_name_to_discriminator.find(name); - if (current_it == variant_info.variant_name_to_discriminator.end()) + auto current_it = old_variant_name_to_discriminator.find(new_variants[discr]->getName()); + if (current_it == old_variant_name_to_discriminator.end()) new_variant_columns_and_discriminators_to_add.emplace_back(new_variants[discr]->createColumn(), discr); else current_to_new_discriminators[current_it->second] = discr; } - variant_info.variant_type = new_variant_type; - variant_info.variant_name = new_variant_type->getName(); - variant_info.variant_names = new_variant_names; - variant_info.variant_name_to_discriminator = new_variant_name_to_discriminator; - assert_cast(*variant_column).extend(current_to_new_discriminators, std::move(new_variant_columns_and_discriminators_to_add)); + assert_cast(variant_column).extend(current_to_new_discriminators, std::move(new_variant_columns_and_discriminators_to_add)); +} + +void ColumnDynamic::updateVariantInfoAndExpandVariantColumn(const DataTypePtr & new_variant_type) +{ + extendVariantColumn(*variant_column, variant_info.variant_type, new_variant_type, variant_info.variant_name_to_discriminator); + createVariantInfo(new_variant_type); + /// Clear mappings cache because now with new Variant we will have new mappings. variant_mappings_cache.clear(); } -std::vector * ColumnDynamic::combineVariants(const DB::ColumnDynamic::VariantInfo & other_variant_info) +std::vector * ColumnDynamic::combineVariants(const ColumnDynamic::VariantInfo & other_variant_info) { /// Check if we already have global discriminators mapping for other Variant in cache. /// It's used to not calculate the same mapping each call of insertFrom with the same columns. @@ -154,20 +195,13 @@ std::vector * ColumnDynamic::combineVariants(const const DataTypes & current_variants = assert_cast(*variant_info.variant_type).getVariants(); /// We cannot combine Variants if total number of variants exceeds max_dynamic_types. - if (current_variants.size() + num_new_variants > max_dynamic_types) + if (!canAddNewVariants(num_new_variants)) { /// Remember that we cannot combine our variant with this one, so we will not try to do it again. variants_with_failed_combination.insert(other_variant_info.variant_name); return nullptr; } - /// We cannot combine Variants if total number of variants reaches max_dynamic_types and we don't have String variant. - if (current_variants.size() + num_new_variants == max_dynamic_types && !variant_info.variant_name_to_discriminator.contains("String") && !other_variant_info.variant_name_to_discriminator.contains("String")) - { - variants_with_failed_combination.insert(other_variant_info.variant_name); - return nullptr; - } - DataTypes all_variants = current_variants; all_variants.insert(all_variants.end(), other_variants.begin(), other_variants.end()); auto new_variant_type = std::make_shared(all_variants); @@ -185,40 +219,93 @@ std::vector * ColumnDynamic::combineVariants(const return &it->second; } -void ColumnDynamic::insert(const DB::Field & x) +void ColumnDynamic::insert(const Field & x) { - /// Check if we can insert field without Variant extension. - if (variant_column->tryInsert(x)) + if (x.isNull()) + { + insertDefault(); return; + } + + auto & variant_col = getVariantColumn(); + auto shared_variant_discr = getSharedVariantDiscriminator(); + /// Check if we can insert field into existing variants and avoid Variant extension. + for (size_t i = 0; i != variant_col.getNumVariants(); ++i) + { + if (i != shared_variant_discr && variant_col.getVariantByGlobalDiscriminator(i).tryInsert(x)) + { + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(i)); + variant_col.getOffsets().push_back(variant_col.getVariantByGlobalDiscriminator(i).size() - 1); + return; + } + } /// If we cannot insert field into current variant column, extend it with new variant for this field from its type. - if (addNewVariant(applyVisitor(FieldToDataType(), x))) + auto field_data_type = applyVisitor(FieldToDataType(), x); + auto field_data_type_name = field_data_type->getName(); + if (addNewVariant(field_data_type, field_data_type_name)) { - /// Now we should be able to insert this field into extended variant column. - variant_column->insert(x); + /// Insert this field into newly added variant. + auto discr = variant_info.variant_name_to_discriminator[field_data_type_name]; + variant_col.getVariantByGlobalDiscriminator(discr).insert(x); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(discr)); + variant_col.getOffsets().push_back(variant_col.getVariantByGlobalDiscriminator(discr).size() - 1); } else { /// We reached maximum number of variants and couldn't add new variant. - /// This case should be really rare in real use cases. - /// We should always be able to add String variant and cast inserted value to String. - addStringVariant(); - variant_column->insert(toString(x)); + /// In this case we add the value of this new variant into special shared variant. + /// We store values in shared variant in binary form with binary encoded type. + auto & shared_variant = getSharedVariant(); + auto & chars = shared_variant.getChars(); + WriteBufferFromVector value_buf(chars, AppendModeTag()); + encodeDataType(field_data_type, value_buf); + getVariantSerialization(field_data_type, field_data_type_name)->serializeBinary(x, value_buf, getFormatSettings()); + value_buf.finalize(); + chars.push_back(0); + shared_variant.getOffsets().push_back(chars.size()); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(shared_variant_discr)); + variant_col.getOffsets().push_back(shared_variant.size() - 1); } } -bool ColumnDynamic::tryInsert(const DB::Field & x) +bool ColumnDynamic::tryInsert(const Field & x) { /// We can insert any value into Dynamic column. insert(x); return true; } +Field ColumnDynamic::operator[](size_t n) const +{ + Field res; + get(n, res); + return res; +} + +void ColumnDynamic::get(size_t n, Field & res) const +{ + const auto & variant_col = getVariantColumn(); + /// Check if value is not in shared variant. + if (variant_col.globalDiscriminatorAt(n) != getSharedVariantDiscriminator()) + { + variant_col.get(n, res); + return; + } + + /// We should deeserialize value from shared variant. + const auto & shared_variant = getSharedVariant(); + auto value_data = shared_variant.getDataAt(variant_col.offsetAt(n)); + ReadBufferFromMemory buf(value_data.data, value_data.size); + auto type = decodeDataType(buf); + getVariantSerialization(type)->deserializeBinary(res, buf, getFormatSettings()); +} + #if !defined(DEBUG_OR_SANITIZER_BUILD) -void ColumnDynamic::insertFrom(const DB::IColumn & src_, size_t n) +void ColumnDynamic::insertFrom(const IColumn & src_, size_t n) #else -void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) +void ColumnDynamic::doInsertFrom(const IColumn & src_, size_t n) #endif { const auto & dynamic_src = assert_cast(src_); @@ -231,6 +318,28 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) } auto & variant_col = assert_cast(*variant_column); + const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); + auto src_offset = src_variant_col.offsetAt(n); + + /// Check if we insert from shared variant and process it separately. + if (src_global_discr == dynamic_src.getSharedVariantDiscriminator()) + { + const auto & src_shared_variant = dynamic_src.getSharedVariant(); + auto value = src_shared_variant.getDataAt(src_offset); + /// Decode data type of this value. + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have this variant and deserialize value into variant from shared variant data. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + variant_col.deserializeBinaryIntoVariant(it->second, getVariantSerialization(type, type_name), buf, getFormatSettings()); + /// Otherwise just insert it into our shared variant. + else + variant_col.insertIntoVariantFrom(getSharedVariantDiscriminator(), src_shared_variant, src_offset); + + return; + } /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) @@ -241,8 +350,6 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) /// We cannot combine 2 Variant types as total number of variants exceeds the limit. /// We need to insert single value, try to add only corresponding variant. - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); - auto src_global_discr = src_variant_col.globalDiscriminatorAt(n); /// NULL doesn't require Variant extension. if (src_global_discr == ColumnVariant::NULL_DISCRIMINATOR) @@ -260,19 +367,18 @@ void ColumnDynamic::doInsertFrom(const DB::IColumn & src_, size_t n) } /// We reached maximum number of variants and couldn't add new variant. - /// We should always be able to add String variant and cast inserted value to String. - addStringVariant(); - auto tmp_variant_column = src_variant_col.getVariantByGlobalDiscriminator(src_global_discr).cloneEmpty(); - tmp_variant_column->insertFrom(src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(n)); - auto tmp_string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - auto string_variant_discr = variant_info.variant_name_to_discriminator["String"]; - variant_col.insertIntoVariantFrom(string_variant_discr, *tmp_string_column, 0); + /// Insert this value into shared variant. + insertValueIntoSharedVariant( + src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), + variant_type, + dynamic_src.variant_info.variant_names[src_global_discr], + src_offset); } #if !defined(DEBUG_OR_SANITIZER_BUILD) -void ColumnDynamic::insertRangeFrom(const DB::IColumn & src_, size_t start, size_t length) +void ColumnDynamic::insertRangeFrom(const IColumn & src_, size_t start, size_t length) #else -void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, size_t length) +void ColumnDynamic::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) #endif { if (start + length > src_.size()) @@ -293,156 +399,206 @@ void ColumnDynamic::doInsertRangeFrom(const DB::IColumn & src_, size_t start, si /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) { - variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping); + size_t prev_size = variant_col.size(); + auto shared_variant_discr = getSharedVariantDiscriminator(); + variant_col.insertRangeFrom(*dynamic_src.variant_column, start, length, *global_discriminators_mapping, shared_variant_discr); + + /// We should process insertion from src shared variant separately, because it can contain + /// values that should be extracted into our variants. insertRangeFrom above didn't insert + /// values into our shared variant (we specified shared_variant_discr as special skip discriminator). + + /// Check if src shared variant is empty, nothing to do in this case. + if (dynamic_src.getSharedVariant().empty()) + return; + + /// Iterate over src discriminators and process insertion from src shared variant. + const auto & src_variant_column = dynamic_src.getVariantColumn(); + const auto src_shared_variant_discr = dynamic_src.getSharedVariantDiscriminator(); + const auto src_shared_variant_local_discr = src_variant_column.localDiscriminatorByGlobal(src_shared_variant_discr); + const auto & src_local_discriminators = src_variant_column.getLocalDiscriminators(); + const auto & src_offsets = src_variant_column.getOffsets(); + const auto & src_shared_variant = assert_cast(src_variant_column.getVariantByLocalDiscriminator(src_shared_variant_local_discr)); + + auto & local_discriminators = variant_col.getLocalDiscriminators(); + auto & offsets = variant_col.getOffsets(); + const auto shared_variant_local_discr = variant_col.localDiscriminatorByGlobal(shared_variant_discr); + auto & shared_variant = assert_cast(variant_col.getVariantByLocalDiscriminator(shared_variant_local_discr)); + for (size_t i = 0; i != length; ++i) + { + if (src_local_discriminators[start + i] == src_shared_variant_local_discr) + { + chassert(local_discriminators[prev_size + i] == shared_variant_local_discr); + auto value = src_shared_variant.getDataAt(src_offsets[start + i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have variant with this type. In this case we should extract + /// the value from src shared variant and insert it into this variant. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + { + auto local_discr = variant_col.localDiscriminatorByGlobal(it->second); + auto & variant = variant_col.getVariantByLocalDiscriminator(local_discr); + getVariantSerialization(type, type_name)->deserializeBinary(variant, buf, getFormatSettings()); + /// Local discriminators were already filled in ColumnVariant::insertRangeFrom and this row should contain + /// shared_variant_local_discr. Change it to local discriminator of the found variant and update offsets. + local_discriminators[prev_size + i] = local_discr; + offsets[prev_size + i] = variant.size() - 1; + } + /// Otherwise, insert this value into shared variant. + else + { + shared_variant.insertData(value.data, value.size); + /// Update variant offset. + offsets[prev_size + i] = shared_variant.size() - 1; + } + } + } + return; } /// We cannot combine 2 Variant types as total number of variants exceeds the limit. - /// In this case we will add most frequent variants from this range and insert them as usual, - /// all other variants will be converted to String. - /// TODO: instead of keeping all current variants and just adding new most frequent variants - /// from source columns we can also try to replace rarest existing variants with frequent - /// variants from source column (so we will avoid casting new frequent variants to String - /// and keeping rare existing ones). It will require rewriting of existing data in Variant - /// column but will improve usability of Dynamic column for example during squashing blocks - /// during insert. - - const auto & src_variant_column = dynamic_src.getVariantColumn(); - - /// Calculate ranges for each variant in current range. - std::vector> variants_ranges(dynamic_src.variant_info.variant_names.size(), {0, 0}); - /// If we insert the whole column, no need to iterate through the range, we can just take variant sizes. - if (start == 0 && length == dynamic_src.size()) - { - for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) - variants_ranges[i] = {0, src_variant_column.getVariantByGlobalDiscriminator(i).size()}; - } - /// Otherwise we need to iterate through discriminators and calculate the range for each variant. - else - { - const auto & local_discriminators = src_variant_column.getLocalDiscriminators(); - const auto & offsets = src_variant_column.getOffsets(); - size_t end = start + length; - for (size_t i = start; i != end; ++i) - { - auto discr = src_variant_column.globalDiscriminatorByLocal(local_discriminators[i]); - if (discr != ColumnVariant::NULL_DISCRIMINATOR) - { - if (!variants_ranges[discr].second) - variants_ranges[discr].first = offsets[i]; - ++variants_ranges[discr].second; - } - } - } - + /// In this case we will add most frequent variants and insert them as usual, + /// all other variants will be inserted into shared variant. const auto & src_variants = assert_cast(*dynamic_src.variant_info.variant_type).getVariants(); - /// List of variants that will be converted to String. - std::vector variants_to_convert_to_string; /// Mapping from global discriminators of src_variant to the new variant we will create. std::vector other_to_new_discriminators; other_to_new_discriminators.reserve(dynamic_src.variant_info.variant_names.size()); - /// Check if we cannot add any more new variants. In this case we will convert all new variants to String. - if (variant_info.variant_names.size() == max_dynamic_types || (variant_info.variant_names.size() == max_dynamic_types - 1 && !variant_info.variant_name_to_discriminator.contains("String"))) + /// Check if we cannot add any more new variants. In this case we will insert all new variants into shared variant. + if (!canAddNewVariant()) { - addStringVariant(); - for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) - { - auto it = variant_info.variant_name_to_discriminator.find(dynamic_src.variant_info.variant_names[i]); - if (it == variant_info.variant_name_to_discriminator.end()) - { - variants_to_convert_to_string.push_back(i); - other_to_new_discriminators.push_back(variant_info.variant_name_to_discriminator["String"]); - } - else - { - other_to_new_discriminators.push_back(it->second); - } - } - } - /// We still can add some new variants, but not all of them. Let's choose the most frequent variants in specified range. - else - { - std::vector> new_variants_with_sizes; - new_variants_with_sizes.reserve(dynamic_src.variant_info.variant_names.size()); - for (size_t i = 0; i != dynamic_src.variant_info.variant_names.size(); ++i) - { - const auto & variant_name = dynamic_src.variant_info.variant_names[i]; - if (variant_name != "String" && !variant_info.variant_name_to_discriminator.contains(variant_name)) - new_variants_with_sizes.emplace_back(variants_ranges[i].second, i); - } - - std::sort(new_variants_with_sizes.begin(), new_variants_with_sizes.end(), std::greater()); - DataTypes new_variants = assert_cast(*variant_info.variant_type).getVariants(); - if (!variant_info.variant_name_to_discriminator.contains("String")) - new_variants.push_back(std::make_shared()); - - for (const auto & [_, discr] : new_variants_with_sizes) - { - if (new_variants.size() != max_dynamic_types) - new_variants.push_back(src_variants[discr]); - else - variants_to_convert_to_string.push_back(discr); - } - - auto new_variant_type = std::make_shared(new_variants); - updateVariantInfoAndExpandVariantColumn(new_variant_type); - auto string_variant_discriminator = variant_info.variant_name_to_discriminator.at("String"); + auto shared_variant_discr = getSharedVariantDiscriminator(); for (const auto & variant_name : dynamic_src.variant_info.variant_names) { auto it = variant_info.variant_name_to_discriminator.find(variant_name); if (it == variant_info.variant_name_to_discriminator.end()) - other_to_new_discriminators.push_back(string_variant_discriminator); + other_to_new_discriminators.push_back(shared_variant_discr); + else + other_to_new_discriminators.push_back(it->second); + } + } + /// We still can add some new variants, but not all of them. Let's choose the most frequent variants. + else + { + /// Create list of pairs and sort it. + std::vector> new_variants_with_sizes; + new_variants_with_sizes.reserve(dynamic_src.variant_info.variant_names.size()); + const auto & src_variant_column = dynamic_src.getVariantColumn(); + for (const auto & [name, discr] : dynamic_src.variant_info.variant_name_to_discriminator) + { + if (!variant_info.variant_name_to_discriminator.contains(name)) + new_variants_with_sizes.emplace_back(src_variant_column.getVariantByGlobalDiscriminator(discr).size(), discr); + } + + std::sort(new_variants_with_sizes.begin(), new_variants_with_sizes.end(), std::greater()); + DataTypes new_variants = assert_cast(*variant_info.variant_type).getVariants(); + /// Add new variants from sorted list until we reach max_dynamic_types. + for (const auto & [_, discr] : new_variants_with_sizes) + { + if (!canAddNewVariant(new_variants.size())) + break; + new_variants.push_back(src_variants[discr]); + } + + auto new_variant_type = std::make_shared(new_variants); + updateVariantInfoAndExpandVariantColumn(new_variant_type); + auto shared_variant_discr = getSharedVariantDiscriminator(); + for (const auto & variant_name : dynamic_src.variant_info.variant_names) + { + auto it = variant_info.variant_name_to_discriminator.find(variant_name); + if (it == variant_info.variant_name_to_discriminator.end()) + other_to_new_discriminators.push_back(shared_variant_discr); else other_to_new_discriminators.push_back(it->second); } } - /// Convert to String all variants that couldn't be added. - std::unordered_map variants_converted_to_string; - variants_converted_to_string.reserve(variants_to_convert_to_string.size()); - for (auto discr : variants_to_convert_to_string) - { - auto [variant_start, variant_length] = variants_ranges[discr]; - const auto & variant = src_variant_column.getVariantPtrByGlobalDiscriminator(discr); - if (variant_start == 0 && variant_length == variant->size()) - variants_converted_to_string[discr] = castColumn(ColumnWithTypeAndName(variant, src_variants[discr], ""), std::make_shared()); - else - variants_converted_to_string[discr] = castColumn(ColumnWithTypeAndName(variant->cut(variant_start, variant_length), src_variants[discr], ""), std::make_shared()); - } - + /// Iterate over the range and perform insertion. + const auto & src_variant_column = dynamic_src.getVariantColumn(); const auto & src_local_discriminators = src_variant_column.getLocalDiscriminators(); const auto & src_offsets = src_variant_column.getOffsets(); const auto & src_variant_columns = src_variant_column.getVariants(); + const auto src_shared_variant_discr = dynamic_src.getSharedVariantDiscriminator(); + const auto src_shared_variant_local_discr = src_variant_column.localDiscriminatorByGlobal(src_shared_variant_discr); + const auto & src_shared_variant = assert_cast(*src_variant_columns[src_shared_variant_local_discr]); + auto & local_discriminators = variant_col.getLocalDiscriminators(); + local_discriminators.reserve(local_discriminators.size() + length); + auto & offsets = variant_col.getOffsets(); + offsets.reserve(offsets.size() + length); + auto & variant_columns = variant_col.getVariants(); + const auto shared_variant_discr = getSharedVariantDiscriminator(); + const auto shared_variant_local_discr = variant_col.localDiscriminatorByGlobal(shared_variant_discr); + auto & shared_variant = assert_cast(*variant_columns[shared_variant_local_discr]); size_t end = start + length; for (size_t i = start; i != end; ++i) { - auto local_discr = src_local_discriminators[i]; - if (local_discr == ColumnVariant::NULL_DISCRIMINATOR) + auto src_local_discr = src_local_discriminators[i]; + auto src_offset = src_offsets[i]; + if (src_local_discr == ColumnVariant::NULL_DISCRIMINATOR) { - variant_col.insertDefault(); + local_discriminators.push_back(ColumnVariant::NULL_DISCRIMINATOR); + offsets.emplace_back(); } else { - auto global_discr = src_variant_column.globalDiscriminatorByLocal(local_discr); - auto to_global_discr = other_to_new_discriminators[global_discr]; - auto it = variants_converted_to_string.find(global_discr); - if (it == variants_converted_to_string.end()) + /// Process insertion from src shared variant separately. + if (src_local_discr == src_shared_variant_local_discr) { - variant_col.insertIntoVariantFrom(to_global_discr, *src_variant_columns[local_discr], src_offsets[i]); + auto value = src_shared_variant.getDataAt(src_offset); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have variant with this type. In this case we should extract + /// the value from src shared variant and insert it into this variant. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + { + auto local_discr = variant_col.localDiscriminatorByGlobal(it->second); + getVariantSerialization(type, type_name)->deserializeBinary(*variant_columns[local_discr], buf, getFormatSettings()); + local_discriminators.push_back(local_discr); + offsets.push_back(variant_columns[local_discr]->size() - 1); + } + /// Otherwise, insert this value into shared variant. + else + { + shared_variant.insertData(value.data, value.size); + local_discriminators.push_back(shared_variant_local_discr); + offsets.push_back(shared_variant.size() - 1); + } } + /// Insertion from usual variant. else { - variant_col.insertIntoVariantFrom(to_global_discr, *it->second, src_offsets[i] - variants_ranges[global_discr].first); + auto src_global_discr = src_variant_column.globalDiscriminatorByLocal(src_local_discr); + auto global_discr = other_to_new_discriminators[src_global_discr]; + /// Check if we need to insert this value into shared variant. + if (global_discr == shared_variant_discr) + { + serializeValueIntoSharedVariant( + shared_variant, + *src_variant_columns[src_local_discr], + src_variants[src_global_discr], + getVariantSerialization(src_variants[src_global_discr], dynamic_src.variant_info.variant_names[src_global_discr]), + src_offset); + local_discriminators.push_back(shared_variant_local_discr); + offsets.push_back(shared_variant.size() - 1); + } + else + { + auto local_discr = variant_col.localDiscriminatorByGlobal(global_discr); + variant_columns[local_discr]->insertFrom(*src_variant_columns[src_local_discr], src_offset); + local_discriminators.push_back(local_discr); + offsets.push_back(variant_columns[local_discr]->size() - 1); + } } } } } #if !defined(DEBUG_OR_SANITIZER_BUILD) -void ColumnDynamic::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length) +void ColumnDynamic::insertManyFrom(const IColumn & src_, size_t position, size_t length) #else -void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, size_t length) +void ColumnDynamic::doInsertManyFrom(const IColumn & src_, size_t position, size_t length) #endif { const auto & dynamic_src = assert_cast(src_); @@ -455,6 +611,36 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, } auto & variant_col = assert_cast(*variant_column); + const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); + auto src_global_discr = src_variant_col.globalDiscriminatorAt(position); + auto src_offset = src_variant_col.offsetAt(position); + + /// Check if we insert from shared variant and process it separately. + if (src_global_discr == dynamic_src.getSharedVariantDiscriminator()) + { + const auto & src_shared_variant = dynamic_src.getSharedVariant(); + auto value = src_shared_variant.getDataAt(src_offset); + /// Decode data type of this value. + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + /// Check if we have this variant and deserialize value into variant from shared variant data. + if (auto it = variant_info.variant_name_to_discriminator.find(type_name); it != variant_info.variant_name_to_discriminator.end()) + { + /// Deserialize value into temporary column and use it in insertManyIntoVariantFrom. + auto tmp_column = type->createColumn(); + tmp_column->reserve(1); + getVariantSerialization(type, type_name)->deserializeBinary(*tmp_column, buf, getFormatSettings()); + variant_col.insertManyIntoVariantFrom(it->second, *tmp_column, 0, length); + } + /// Otherwise just insert it into our shared variant. + else + { + variant_col.insertManyIntoVariantFrom(getSharedVariantDiscriminator(), src_shared_variant, src_offset, length); + } + + return; + } /// If variants are different, we need to extend our variant with new variants. if (auto * global_discriminators_mapping = combineVariants(dynamic_src.variant_info)) @@ -465,8 +651,6 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, /// We cannot combine 2 Variant types as total number of variants exceeds the limit. /// We need to insert single value, try to add only corresponding variant. - const auto & src_variant_col = assert_cast(*dynamic_src.variant_column); - auto src_global_discr = src_variant_col.globalDiscriminatorAt(position); if (src_global_discr == ColumnVariant::NULL_DISCRIMINATOR) { insertDefault(); @@ -481,21 +665,51 @@ void ColumnDynamic::doInsertManyFrom(const DB::IColumn & src_, size_t position, return; } - addStringVariant(); - auto tmp_variant_column = src_variant_col.getVariantByGlobalDiscriminator(src_global_discr).cloneEmpty(); - tmp_variant_column->insertFrom(src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), src_variant_col.offsetAt(position)); - auto tmp_string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - auto string_variant_discr = variant_info.variant_name_to_discriminator["String"]; - variant_col.insertManyIntoVariantFrom(string_variant_discr, *tmp_string_column, 0, length); + /// We reached maximum number of variants and couldn't add new variant. + /// Insert this value into shared variant. + /// Create temporary string column, serialize value into it and use it in insertManyIntoVariantFrom. + auto tmp_shared_variant = ColumnString::create(); + serializeValueIntoSharedVariant( + *tmp_shared_variant, + src_variant_col.getVariantByGlobalDiscriminator(src_global_discr), + variant_type, + getVariantSerialization(variant_type, dynamic_src.variant_info.variant_names[src_global_discr]), + src_offset); + + variant_col.insertManyIntoVariantFrom(getSharedVariantDiscriminator(), *tmp_shared_variant, 0, length); } +void ColumnDynamic::insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n) +{ + auto & variant_col = getVariantColumn(); + auto & shared_variant = getSharedVariant(); + serializeValueIntoSharedVariant(shared_variant, src, type, getVariantSerialization(type, type_name), n); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(getSharedVariantDiscriminator())); + variant_col.getOffsets().push_back(shared_variant.size() - 1); +} -StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, const char *& begin) const +void ColumnDynamic::serializeValueIntoSharedVariant( + ColumnString & shared_variant, + const IColumn & src, + const DataTypePtr & type, + const SerializationPtr & serialization, + size_t n) +{ + auto & chars = shared_variant.getChars(); + WriteBufferFromVector value_buf(chars, AppendModeTag()); + encodeDataType(type, value_buf); + serialization->serializeBinary(src, n, value_buf, getFormatSettings()); + value_buf.finalize(); + chars.push_back(0); + shared_variant.getOffsets().push_back(chars.size()); +} + +StringRef ColumnDynamic::serializeValueIntoArena(size_t n, Arena & arena, const char *& begin) const { /// We cannot use Variant serialization here as it serializes discriminator + value, /// but Dynamic doesn't have fixed mapping discriminator <-> variant type /// as different Dynamic column can have different Variants. - /// Instead, we serialize null bit + variant type in binary format (size + bytes) + value. + /// Instead, we serialize null bit + variant type and value in binary format (size + data). const auto & variant_col = assert_cast(*variant_column); auto discr = variant_col.globalDiscriminatorAt(n); StringRef res; @@ -509,19 +723,29 @@ StringRef ColumnDynamic::serializeValueIntoArena(size_t n, DB::Arena & arena, co return res; } - const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(discr); - String variant_type_binary_data = encodeDataType(variant_type); - size_t variant_type_binary_data_size = variant_type_binary_data.size(); - char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(), begin); - memcpy(pos, &null_bit, sizeof(UInt8)); - memcpy(pos + sizeof(UInt8), &variant_type_binary_data_size, sizeof(size_t)); - memcpy(pos + sizeof(UInt8) + sizeof(size_t), variant_type_binary_data.data(), variant_type_binary_data.size()); - res.data = pos; - res.size = sizeof(UInt8) + sizeof(size_t) + variant_type_binary_data.size(); + WriteBufferFromOwnString buf; + StringRef type_and_value; + /// If we have value from shared variant, it's already stored in the desired format. + if (discr == getSharedVariantDiscriminator()) + { + type_and_value = getSharedVariant().getDataAt(variant_col.offsetAt(n)); + } + /// For regular variants serialize its type and value in binary format. + else + { + const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(discr); + encodeDataType(variant_type, buf); + getVariantSerialization(variant_type, variant_info.variant_names[discr]) + ->serializeBinary(variant_col.getVariantByGlobalDiscriminator(discr), variant_col.offsetAt(n), buf, getFormatSettings()); + type_and_value = buf.str(); + } - auto value_ref = variant_col.getVariantByGlobalDiscriminator(discr).serializeValueIntoArena(variant_col.offsetAt(n), arena, begin); - res.data = value_ref.data - res.size; - res.size += value_ref.size; + char * pos = arena.allocContinue(sizeof(UInt8) + sizeof(size_t) + type_and_value.size, begin); + memcpy(pos, &null_bit, sizeof(UInt8)); + memcpy(pos + sizeof(UInt8), &type_and_value.size, sizeof(size_t)); + memcpy(pos + sizeof(UInt8) + sizeof(size_t), type_and_value.data, type_and_value.size); + res.data = pos; + res.size = sizeof(UInt8) + sizeof(size_t) + type_and_value.size; return res; } @@ -536,39 +760,36 @@ const char * ColumnDynamic::deserializeAndInsertFromArena(const char * pos) return pos; } - /// Read variant type in binary format. - const size_t variant_type_binary_data_size = unalignedLoad(pos); - pos += sizeof(variant_type_binary_data_size); - String variant_type_binary_data; - variant_type_binary_data.resize(variant_type_binary_data_size); - memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size); - pos += variant_type_binary_data_size; - auto variant_type = decodeDataType(variant_type_binary_data); + /// Read variant type and value in binary format. + const size_t type_and_value_size = unalignedLoad(pos); + pos += sizeof(type_and_value_size); + std::string_view type_and_value(pos, type_and_value_size); + pos += type_and_value_size; + + ReadBufferFromMemory buf(type_and_value.data(), type_and_value.size()); + auto variant_type = decodeDataType(buf); auto variant_name = variant_type->getName(); /// If we already have such variant, just deserialize it into corresponding variant column. auto it = variant_info.variant_name_to_discriminator.find(variant_name); if (it != variant_info.variant_name_to_discriminator.end()) { - auto discr = it->second; - return variant_col.deserializeVariantAndInsertFromArena(discr, pos); + variant_col.deserializeBinaryIntoVariant(it->second, getVariantSerialization(variant_type, variant_name), buf, getFormatSettings()); } - - /// If we don't have such variant, add it. - if (likely(addNewVariant(variant_type))) + /// If we don't have such variant, try to add it. + else if (likely(addNewVariant(variant_type))) { auto discr = variant_info.variant_name_to_discriminator[variant_name]; - return variant_col.deserializeVariantAndInsertFromArena(discr, pos); + variant_col.deserializeBinaryIntoVariant(discr, getVariantSerialization(variant_type, variant_name), buf, getFormatSettings()); + } + /// Otherwise insert this value into shared variant. + else + { + auto & shared_variant = getSharedVariant(); + shared_variant.insertData(type_and_value.data(), type_and_value.size()); + variant_col.getLocalDiscriminators().push_back(variant_col.localDiscriminatorByGlobal(getSharedVariantDiscriminator())); + variant_col.getOffsets().push_back(shared_variant.size() - 1); } - /// We reached maximum number of variants and couldn't add new variant. - /// We should always be able to add String variant and cast inserted value to String. - addStringVariant(); - /// Create temporary column of this variant type and deserialize value into it. - auto tmp_variant_column = variant_type->createColumn(); - pos = tmp_variant_column->deserializeAndInsertFromArena(pos); - /// Cast temporary column to String and insert this value into String variant. - auto str_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - variant_col.insertIntoVariantFrom(variant_info.variant_name_to_discriminator["String"], *str_column, 0); return pos; } @@ -579,14 +800,10 @@ const char * ColumnDynamic::skipSerializedInArena(const char * pos) const if (null_bit) return pos; - const size_t variant_type_binary_data_size = unalignedLoad(pos); - pos += sizeof(variant_type_binary_data_size); - String variant_type_binary_data; - variant_type_binary_data.resize(variant_type_binary_data_size); - memcpy(variant_type_binary_data.data(), pos, variant_type_binary_data_size); - pos += variant_type_binary_data_size; - auto tmp_variant_column = decodeDataType(variant_type_binary_data)->createColumn(); - return tmp_variant_column->skipSerializedInArena(pos); + const size_t type_and_value_size = unalignedLoad(pos); + pos += sizeof(type_and_value_size); + pos += type_and_value_size; + return pos; } void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const @@ -604,9 +821,9 @@ void ColumnDynamic::updateHashWithValue(size_t n, SipHash & hash) const } #if !defined(DEBUG_OR_SANITIZER_BUILD) -int ColumnDynamic::compareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const +int ColumnDynamic::compareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #else -int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int nan_direction_hint) const +int ColumnDynamic::doCompareAt(size_t n, size_t m, const IColumn & rhs, int nan_direction_hint) const #endif { const auto & left_variant = assert_cast(*variant_column); @@ -614,7 +831,9 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int const auto & right_variant = assert_cast(*right_dynamic.variant_column); auto left_discr = left_variant.globalDiscriminatorAt(n); + auto left_shared_variant_discr = getSharedVariantDiscriminator(); auto right_discr = right_variant.globalDiscriminatorAt(m); + auto right_shared_variant_discr = right_dynamic.getSharedVariantDiscriminator(); /// Check if we have NULLs and return result based on nan_direction_hint. if (left_discr == ColumnVariant::NULL_DISCRIMINATOR && right_discr == ColumnVariant::NULL_DISCRIMINATOR) @@ -624,12 +843,129 @@ int ColumnDynamic::doCompareAt(size_t n, size_t m, const DB::IColumn & rhs, int else if (right_discr == ColumnVariant::NULL_DISCRIMINATOR) return -nan_direction_hint; - /// If rows have different types, we compare type names. - if (variant_info.variant_names[left_discr] != right_dynamic.variant_info.variant_names[right_discr]) - return variant_info.variant_names[left_discr] < right_dynamic.variant_info.variant_names[right_discr] ? -1 : 1; + /// Check if both values are in shared variant. + if (left_discr == left_shared_variant_discr && right_discr == right_shared_variant_discr) + { + /// First check if both type and value are equal. + auto left_value = getSharedVariant().getDataAt(left_variant.offsetAt(n)); + auto right_value = right_dynamic.getSharedVariant().getDataAt(right_variant.offsetAt(m)); + if (left_value == right_value) + return 0; - /// If rows have the same types, compare actual values from corresponding variants. - return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint); + /// Extract type names from both values. + ReadBufferFromMemory buf_left(left_value.data, left_value.size); + auto left_data_type = decodeDataType(buf_left); + auto left_data_type_name = left_data_type->getName(); + + ReadBufferFromMemory buf_right(right_value.data, right_value.size); + auto right_data_type = decodeDataType(buf_right); + auto right_data_type_name = right_data_type->getName(); + + /// If rows have different types, we compare type names. + if (left_data_type_name != right_data_type_name) + return left_data_type_name < right_data_type_name ? -1 : 1; + + /// If rows have the same type, we compare actual values. + /// We have both values serialized in binary format, so we need to + /// create temporary column, insert both values into it and compare. + auto tmp_column = left_data_type->createColumn(); + const auto & serialization = getVariantSerialization(left_data_type, left_data_type_name); + serialization->deserializeBinary(*tmp_column, buf_left, getFormatSettings()); + serialization->deserializeBinary(*tmp_column, buf_right, getFormatSettings()); + return tmp_column->compareAt(0, 1, *tmp_column, nan_direction_hint); + } + /// Check if only left value is in shared data. + else if (left_discr == left_shared_variant_discr) + { + /// Extract left type name from the value. + auto left_value = getSharedVariant().getDataAt(left_variant.offsetAt(n)); + ReadBufferFromMemory buf_left(left_value.data, left_value.size); + auto left_data_type = decodeDataType(buf_left); + auto left_data_type_name = left_data_type->getName(); + + /// If rows have different types, we compare type names. + if (left_data_type_name != right_dynamic.variant_info.variant_names[right_discr]) + return left_data_type_name < right_dynamic.variant_info.variant_names[right_discr] ? -1 : 1; + + /// If rows have the same type, we compare actual values. + /// We have left value serialized in binary format, we need to + /// create temporary column, insert the value into it and compare. + auto tmp_column = left_data_type->createColumn(); + getVariantSerialization(left_data_type, left_data_type_name)->deserializeBinary(*tmp_column, buf_left, getFormatSettings()); + return tmp_column->compareAt(0, right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint); + } + /// Check if only right value is in shared data. + else if (right_discr == right_shared_variant_discr) + { + /// Extract right type name from the value. + auto right_value = right_dynamic.getSharedVariant().getDataAt(right_variant.offsetAt(m)); + ReadBufferFromMemory buf_right(right_value.data, right_value.size); + auto right_data_type = decodeDataType(buf_right); + auto right_data_type_name = right_data_type->getName(); + + /// If rows have different types, we compare type names. + if (variant_info.variant_names[left_discr] != right_data_type_name) + return variant_info.variant_names[left_discr] < right_data_type_name ? -1 : 1; + + /// If rows have the same type, we compare actual values. + /// We have right value serialized in binary format, we need to + /// create temporary column, insert the value into it and compare. + auto tmp_column = right_data_type->createColumn(); + getVariantSerialization(right_data_type, right_data_type_name)->deserializeBinary(*tmp_column, buf_right, getFormatSettings()); + return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), 0, *tmp_column, nan_direction_hint); + } + /// Otherwise both values are regular variants. + else + { + /// If rows have different types, we compare type names. + if (variant_info.variant_names[left_discr] != right_dynamic.variant_info.variant_names[right_discr]) + return variant_info.variant_names[left_discr] < right_dynamic.variant_info.variant_names[right_discr] ? -1 : 1; + + /// If rows have the same types, compare actual values from corresponding variants. + return left_variant.getVariantByGlobalDiscriminator(left_discr).compareAt(left_variant.offsetAt(n), right_variant.offsetAt(m), right_variant.getVariantByGlobalDiscriminator(right_discr), nan_direction_hint); + } +} + +struct ColumnDynamic::ComparatorBase +{ + const ColumnDynamic & parent; + int nan_direction_hint; + + ComparatorBase(const ColumnDynamic & parent_, int nan_direction_hint_) + : parent(parent_), nan_direction_hint(nan_direction_hint_) + { + } + + ALWAYS_INLINE int compare(size_t lhs, size_t rhs) const + { + return parent.compareAt(lhs, rhs, parent, nan_direction_hint); + } +}; + +void ColumnDynamic::getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res) const +{ + if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Unstable) + getPermutationImpl(limit, res, ComparatorAscendingUnstable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Stable) + getPermutationImpl(limit, res, ComparatorAscendingStable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Unstable) + getPermutationImpl(limit, res, ComparatorDescendingUnstable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Stable) + getPermutationImpl(limit, res, ComparatorDescendingStable(*this, nan_direction_hint), DefaultSort(), DefaultPartialSort()); +} + +void ColumnDynamic::updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, DB::EqualRanges & equal_ranges) const +{ + auto comparator_equal = ComparatorEqual(*this, nan_direction_hint); + + if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Unstable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorAscendingUnstable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Ascending && stability == IColumn::PermutationSortStability::Stable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorAscendingStable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Unstable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorDescendingUnstable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); + else if (direction == IColumn::PermutationSortDirection::Descending && stability == IColumn::PermutationSortStability::Stable) + updatePermutationImpl(limit, res, equal_ranges, ComparatorDescendingStable(*this, nan_direction_hint), comparator_equal, DefaultSort(), DefaultPartialSort()); } ColumnPtr ColumnDynamic::compress() const @@ -637,12 +973,123 @@ ColumnPtr ColumnDynamic::compress() const ColumnPtr variant_compressed = variant_column->compress(); size_t byte_size = variant_compressed->byteSize(); return ColumnCompressed::create(size(), byte_size, - [my_variant_compressed = std::move(variant_compressed), my_variant_info = variant_info, my_max_dynamic_types = max_dynamic_types, my_statistics = statistics]() mutable + [my_variant_compressed = std::move(variant_compressed), my_variant_info = variant_info, my_max_dynamic_types = max_dynamic_types, my_global_max_dynamic_types = global_max_dynamic_types, my_statistics = statistics]() mutable { - return ColumnDynamic::create(my_variant_compressed->decompress(), my_variant_info, my_max_dynamic_types, my_statistics); + return ColumnDynamic::create(my_variant_compressed->decompress(), my_variant_info, my_max_dynamic_types, my_global_max_dynamic_types, my_statistics); }); } +void ColumnDynamic::prepareForSquashing(const Columns & source_columns) +{ + if (source_columns.empty()) + return; + + /// Internal variants of source dynamic columns may differ. + /// We want to preallocate memory for all variants we will have after squashing. + /// It may happen that the total number of variants in source columns will + /// exceed the limit, in this case we will choose the most frequent variants + /// and insert the rest types into the shared variant. + + /// First, preallocate memory for variant discriminators and offsets. + size_t new_size = size(); + for (const auto & source_column : source_columns) + new_size += source_column->size(); + auto & variant_col = getVariantColumn(); + variant_col.getLocalDiscriminators().reserve_exact(new_size); + variant_col.getOffsets().reserve_exact(new_size); + + /// Second, collect all variants and their total sizes. + std::unordered_map total_variant_sizes; + DataTypes all_variants; + + auto add_variants = [&](const ColumnDynamic & source_dynamic) + { + const auto & source_variant_column = source_dynamic.getVariantColumn(); + const auto & source_variant_info = source_dynamic.getVariantInfo(); + const auto & source_variants = assert_cast(*source_variant_info.variant_type).getVariants(); + + for (size_t i = 0; i != source_variants.size(); ++i) + { + const auto & variant_name = source_variant_info.variant_names[i]; + auto it = total_variant_sizes.find(variant_name); + /// Add this variant to the list of all variants if we didn't see it yet. + if (it == total_variant_sizes.end()) + { + all_variants.push_back(source_variants[i]); + it = total_variant_sizes.emplace(variant_name, 0).first; + } + + it->second += source_variant_column.getVariantByGlobalDiscriminator(i).size(); + } + }; + + for (const auto & source_column : source_columns) + add_variants(assert_cast(*source_column)); + + /// Add variants from this dynamic column. + add_variants(*this); + + DataTypePtr result_variant_type; + /// Check if the number of all variants exceeds the limit. + if (!canAddNewVariants(0, all_variants.size())) + { + /// We want to keep the most frequent variants in the resulting dynamic column. + DataTypes result_variants; + result_variants.reserve(max_dynamic_types + 1); /// +1 for shared variant. + /// Add variants from current variant column as we will not rewrite it. + for (const auto & variant : assert_cast(*variant_info.variant_type).getVariants()) + result_variants.push_back(variant); + + /// Create list of remaining variants with their sizes and sort it. + std::vector> variants_with_sizes; + variants_with_sizes.reserve(all_variants.size() - variant_info.variant_names.size()); + for (const auto & variant : all_variants) + { + /// Add variant to the list only of we didn't add it yet. + auto variant_name = variant->getName(); + if (!variant_info.variant_name_to_discriminator.contains(variant_name)) + variants_with_sizes.emplace_back(total_variant_sizes[variant_name], variant); + } + + std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater()); + /// Add the most frequent variants until we reach max_dynamic_types. + for (const auto & [_, new_variant] : variants_with_sizes) + { + if (!canAddNewVariant(result_variants.size())) + break; + result_variants.push_back(new_variant); + } + + result_variant_type = std::make_shared(result_variants); + } + else + { + result_variant_type = std::make_shared(all_variants); + } + + if (!result_variant_type->equals(*variant_info.variant_type)) + updateVariantInfoAndExpandVariantColumn(result_variant_type); + + /// Now current dynamic column has all resulting variants and we can call + /// prepareForSquashing on them to preallocate the memory. + for (size_t i = 0; i != variant_info.variant_names.size(); ++i) + { + Columns source_variant_columns; + source_variant_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + { + const auto & source_dynamic_column = assert_cast(*source_column); + const auto & source_variant_info = source_dynamic_column.getVariantInfo(); + /// Try to find this variant in the current source column. + auto it = source_variant_info.variant_name_to_discriminator.find(variant_info.variant_names[i]); + if (it != source_variant_info.variant_name_to_discriminator.end()) + source_variant_columns.push_back(source_dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(it->second)); + } + + variant_col.getVariantByGlobalDiscriminator(i).prepareForSquashing(source_variant_columns); + } +} + void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source_columns) { if (!empty()) @@ -663,6 +1110,9 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source /// First, collect all variants from all source columns and calculate total sizes. std::unordered_map total_sizes; DataTypes all_variants; + /// Add shared variant type in advance; + all_variants.push_back(getSharedVariantDataType()); + total_sizes[getSharedVariantTypeName()] = 0; for (const auto & source_column : source_columns) { @@ -671,7 +1121,7 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source const auto & source_variant_info = source_dynamic.getVariantInfo(); const auto & source_variants = assert_cast(*source_variant_info.variant_type).getVariants(); /// During deserialization from MergeTree we will have variant sizes statistics from the whole data part. - const auto & source_statistics = source_dynamic.getStatistics(); + const auto & source_statistics = source_dynamic.getStatistics(); for (size_t i = 0; i != source_variants.size(); ++i) { const auto & variant_name = source_variant_info.variant_names[i]; @@ -682,35 +1132,67 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source all_variants.push_back(source_variants[i]); it = total_sizes.emplace(variant_name, 0).first; } - auto statistics_it = source_statistics.data.find(variant_name); - size_t size = statistics_it == source_statistics.data.end() ? source_variant_column.getVariantByGlobalDiscriminator(i).size() : statistics_it->second; + size_t size = source_variant_column.getVariantByGlobalDiscriminator(i).size(); + if (source_statistics) + { + auto statistics_it = source_statistics->variants_statistics.find(variant_name); + if (statistics_it != source_statistics->variants_statistics.end()) + size = statistics_it->second; + } + it->second += size; } + + /// Use add variants from shared variant statistics. It can help extracting + /// frequent variants from shared variant to usual variants. + if (source_statistics) + { + for (const auto & [variant_name, size] : source_statistics->shared_variants_statistics) + { + auto it = total_sizes.find(variant_name); + /// Add this variant to the list of all variants if we didn't see it yet. + if (it == total_sizes.end()) + { + all_variants.push_back(DataTypeFactory::instance().get(variant_name)); + it = total_sizes.emplace(variant_name, 0).first; + } + it->second += size; + } + } } DataTypePtr result_variant_type; - /// Check if the number of all variants exceeds the limit. - if (all_variants.size() > max_dynamic_types || (all_variants.size() == max_dynamic_types && !total_sizes.contains("String"))) + Statistics new_statistics(Statistics::Source::MERGE); + /// Reset max_dynamic_types to global_max_dynamic_types. + max_dynamic_types = global_max_dynamic_types; + /// Check if the number of all dynamic types exceeds the limit. + if (!canAddNewVariants(0, all_variants.size())) { /// Create list of variants with their sizes and sort it. std::vector> variants_with_sizes; variants_with_sizes.reserve(all_variants.size()); for (const auto & variant : all_variants) - variants_with_sizes.emplace_back(total_sizes[variant->getName()], variant); + { + if (variant->getName() != getSharedVariantTypeName()) + variants_with_sizes.emplace_back(total_sizes[variant->getName()], variant); + } std::sort(variants_with_sizes.begin(), variants_with_sizes.end(), std::greater()); - /// Take first max_dynamic_types variants from sorted list. + /// Take first max_dynamic_types variants from sorted list and fill shared_variants_statistics with the rest. DataTypes result_variants; - result_variants.reserve(max_dynamic_types); - /// Add String variant in advance. - result_variants.push_back(std::make_shared()); - for (const auto & [_, variant] : variants_with_sizes) + result_variants.reserve(max_dynamic_types + 1); /// +1 for shared variant. + /// Add shared variant. + result_variants.push_back(getSharedVariantDataType()); + for (const auto & [size, variant] : variants_with_sizes) { - if (result_variants.size() == max_dynamic_types) - break; - - if (variant->getName() != "String") + /// Add variant to the resulting variants list until we reach max_dynamic_types. + if (canAddNewVariant(result_variants.size())) result_variants.push_back(variant); + /// Add all remaining variants into shared_variants_statistics until we reach its max size. + else if (new_statistics.shared_variants_statistics.size() < Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) + new_statistics.shared_variants_statistics[variant->getName()] = size; + else + break; } result_variant_type = std::make_shared(result_variants); @@ -720,26 +1202,17 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source result_variant_type = std::make_shared(all_variants); } - /// Now we have resulting Variant and can fill variant info. - variant_info.variant_type = result_variant_type; - variant_info.variant_name = result_variant_type->getName(); - const auto & result_variants = assert_cast(*result_variant_type).getVariants(); - variant_info.variant_names.clear(); - variant_info.variant_names.reserve(result_variants.size()); - variant_info.variant_name_to_discriminator.clear(); - variant_info.variant_name_to_discriminator.reserve(result_variants.size()); - statistics.data.clear(); - statistics.data.reserve(result_variants.size()); - statistics.source = Statistics::Source::MERGE; - for (size_t i = 0; i != result_variants.size(); ++i) - { - auto variant_name = result_variants[i]->getName(); - variant_info.variant_names.push_back(variant_name); - variant_info.variant_name_to_discriminator[variant_name] = i; - statistics.data[variant_name] = total_sizes[variant_name]; - } + /// Now we have resulting Variant and can fill variant info and create merge statistics. + setVariantType(result_variant_type); + new_statistics.variants_statistics.reserve(variant_info.variant_names.size()); + for (const auto & variant_name : variant_info.variant_names) + new_statistics.variants_statistics[variant_name] = total_sizes[variant_name]; + statistics = std::make_shared(std::move(new_statistics)); - variant_column = variant_info.variant_type->createColumn(); + /// Reduce max_dynamic_types to the number of selected variants, so there will be no possibility + /// to extend selected variants on inerts into this column during merges. + /// -1 because we don't count shared variant in the limit. + max_dynamic_types = variant_info.variant_names.size() - 1; /// Now we have the resulting Variant that will be used in all merged columns. /// Variants can also contain Dynamic columns inside, we should collect @@ -755,7 +1228,7 @@ void ColumnDynamic::takeDynamicStructureFromSourceColumns(const Columns & source { /// Try to find this variant in current source column. auto it = source_variant_info.variant_name_to_discriminator.find(variant_info.variant_names[i]); - if (it != source_variant_info.variant_name_to_discriminator.end()) + if (it != source_variant_info.variant_name_to_discriminator.end()) /// Add shared variant. variants_source_columns[i].push_back(source_dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(it->second)); } } diff --git a/src/Columns/ColumnDynamic.h b/src/Columns/ColumnDynamic.h index e92cabd3db9..e6e720765f6 100644 --- a/src/Columns/ColumnDynamic.h +++ b/src/Columns/ColumnDynamic.h @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -19,11 +20,19 @@ namespace DB * * When new values are inserted into Dynamic column, the internal Variant * type and column are extended if the inserted value has new type. + * When the limit on number of dynamic types is exceeded, all values + * with new types are inserted into special shared variant with type String + * that contains values and their types in binary format. */ class ColumnDynamic final : public COWHelper, ColumnDynamic> { public: - /// + /// Maximum limit on dynamic types. We use ColumnVariant to store all the types, + /// so the limit cannot be greater then ColumnVariant::MAX_NESTED_COLUMNS. + /// We also always have reserved variant for shared variant. + static constexpr size_t MAX_DYNAMIC_TYPES_LIMIT = ColumnVariant::MAX_NESTED_COLUMNS - 1; + static constexpr const char * SHARED_VARIANT_TYPE_NAME = "SharedVariant"; + struct Statistics { enum class Source @@ -32,12 +41,27 @@ public: MERGE, /// Statistics were calculated during merge of several MergeTree parts. }; + explicit Statistics(Source source_) : source(source_) {} + /// Source of the statistics. Source source; - /// Statistics data: (variant name) -> (total variant size in data part). - std::unordered_map data; + /// Statistics data for usual variants: (variant name) -> (total variant size in data part). + std::unordered_map variants_statistics; + /// Statistics data for variants from shared variant: (variant name) -> (total variant size in data part). + /// For shared variant we store statistics only for first 256 variants (should cover almost all cases and it's not expensive). + static constexpr const size_t MAX_SHARED_VARIANT_STATISTICS_SIZE = 256; + std::unordered_map shared_variants_statistics; }; + using StatisticsPtr = std::shared_ptr; + + struct ComparatorBase; + using ComparatorAscendingUnstable = ComparatorAscendingUnstableImpl; + using ComparatorAscendingStable = ComparatorAscendingStableImpl; + using ComparatorDescendingUnstable = ComparatorDescendingUnstableImpl; + using ComparatorDescendingStable = ComparatorDescendingStableImpl; + using ComparatorEqual = ComparatorEqualImpl; + private: friend class COWHelper, ColumnDynamic>; @@ -54,28 +78,32 @@ private: }; explicit ColumnDynamic(size_t max_dynamic_types_); - ColumnDynamic(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {}); + ColumnDynamic(MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}); + ColumnDynamic(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}); public: /** Create immutable column using immutable arguments. This arguments may be shared with other columns. * Use IColumn::mutate in order to make mutable column and mutate shared nested columns. */ using Base = COWHelper, ColumnDynamic>; - static Ptr create(const ColumnPtr & variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {}) + static Ptr create(const ColumnPtr & variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) { - return ColumnDynamic::create(variant_column_->assumeMutable(), variant_info_, max_dynamic_types_, statistics_); + return ColumnDynamic::create(variant_column_->assumeMutable(), variant_info_, max_dynamic_types_, global_max_dynamic_types_, statistics_); } - static MutablePtr create(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, const Statistics & statistics_ = {}) + static MutablePtr create(MutableColumnPtr variant_column_, const VariantInfo & variant_info_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) { - return Base::create(std::move(variant_column_), variant_info_, max_dynamic_types_, statistics_); + return Base::create(std::move(variant_column_), variant_info_, max_dynamic_types_, global_max_dynamic_types_, statistics_); } - static MutablePtr create(MutableColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_ = {}); - - static ColumnPtr create(ColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, const Statistics & statistics_ = {}) + static MutablePtr create(MutableColumnPtr variant_column_, const DataTypePtr & variant_type_, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) { - return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, statistics_); + return Base::create(std::move(variant_column_), variant_type_, max_dynamic_types_, global_max_dynamic_types_, statistics_); + } + + static ColumnPtr create(ColumnPtr variant_column_, const DataTypePtr & variant_type, size_t max_dynamic_types_, size_t global_max_dynamic_types_, const StatisticsPtr & statistics_ = {}) + { + return create(variant_column_->assumeMutable(), variant_type, max_dynamic_types_, global_max_dynamic_types_, statistics_); } static MutablePtr create(size_t max_dynamic_types_) @@ -83,7 +111,7 @@ public: return Base::create(max_dynamic_types_); } - std::string getName() const override { return "Dynamic(max_types=" + std::to_string(max_dynamic_types) + ")"; } + std::string getName() const override { return "Dynamic(max_types=" + std::to_string(global_max_dynamic_types) + ")"; } const char * getFamilyName() const override { @@ -98,12 +126,12 @@ public: MutableColumnPtr cloneEmpty() const override { /// Keep current dynamic structure - return Base::create(variant_column->cloneEmpty(), variant_info, max_dynamic_types, statistics); + return Base::create(variant_column->cloneEmpty(), variant_info, max_dynamic_types, global_max_dynamic_types, statistics); } MutableColumnPtr cloneResized(size_t size) const override { - return Base::create(variant_column->cloneResized(size), variant_info, max_dynamic_types, statistics); + return Base::create(variant_column->cloneResized(size), variant_info, max_dynamic_types, global_max_dynamic_types, statistics); } size_t size() const override @@ -111,15 +139,9 @@ public: return variant_column->size(); } - Field operator[](size_t n) const override - { - return (*variant_column)[n]; - } + Field operator[](size_t n) const override; - void get(size_t n, Field & res) const override - { - variant_column->get(n, res); - } + void get(size_t n, Field & res) const override; bool isDefaultAt(size_t n) const override { @@ -187,7 +209,7 @@ public: ColumnPtr filter(const Filter & filt, ssize_t result_size_hint) const override { - return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types); + return create(variant_column->filter(filt, result_size_hint), variant_info, max_dynamic_types, global_max_dynamic_types); } void expand(const Filter & mask, bool inverted) override @@ -197,17 +219,17 @@ public: ColumnPtr permute(const Permutation & perm, size_t limit) const override { - return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types); + return create(variant_column->permute(perm, limit), variant_info, max_dynamic_types, global_max_dynamic_types); } ColumnPtr index(const IColumn & indexes, size_t limit) const override { - return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types); + return create(variant_column->index(indexes, limit), variant_info, max_dynamic_types, global_max_dynamic_types); } ColumnPtr replicate(const Offsets & replicate_offsets) const override { - return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types); + return create(variant_column->replicate(replicate_offsets), variant_info, max_dynamic_types, global_max_dynamic_types); } MutableColumns scatter(ColumnIndex num_columns, const Selector & selector) const override @@ -216,7 +238,7 @@ public: MutableColumns scattered_columns; scattered_columns.reserve(num_columns); for (auto & scattered_variant_column : scattered_variant_columns) - scattered_columns.emplace_back(create(std::move(scattered_variant_column), variant_info, max_dynamic_types)); + scattered_columns.emplace_back(create(std::move(scattered_variant_column), variant_info, max_dynamic_types, global_max_dynamic_types)); return scattered_columns; } @@ -238,22 +260,18 @@ public: } void getPermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, - size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override - { - variant_column->getPermutation(direction, stability, limit, nan_direction_hint, res); - } + size_t limit, int nan_direction_hint, IColumn::Permutation & res) const override; void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, - size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override - { - variant_column->updatePermutation(direction, stability, limit, nan_direction_hint, res, equal_ranges); - } + size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override { variant_column->reserve(n); } + void prepareForSquashing(const Columns & source_columns) override; + void ensureOwnership() override { variant_column->ensureOwnership(); @@ -293,7 +311,7 @@ public: bool structureEquals(const IColumn & rhs) const override { if (const auto * rhs_concrete = typeid_cast(&rhs)) - return max_dynamic_types == rhs_concrete->max_dynamic_types; + return global_max_dynamic_types == rhs_concrete->global_max_dynamic_types; return false; } @@ -336,17 +354,75 @@ public: const ColumnVariant & getVariantColumn() const { return assert_cast(*variant_column); } ColumnVariant & getVariantColumn() { return assert_cast(*variant_column); } - bool addNewVariant(const DataTypePtr & new_variant); - void addStringVariant(); + bool addNewVariant(const DataTypePtr & new_variant, const String & new_variant_name); + bool addNewVariant(const DataTypePtr & new_variant) { return addNewVariant(new_variant, new_variant->getName()); } bool hasDynamicStructure() const override { return true; } void takeDynamicStructureFromSourceColumns(const Columns & source_columns) override; - const Statistics & getStatistics() const { return statistics; } + const StatisticsPtr & getStatistics() const { return statistics; } + void setStatistics(const StatisticsPtr & statistics_) { statistics = statistics_; } size_t getMaxDynamicTypes() const { return max_dynamic_types; } + /// Check if we can add new variant types. + /// Shared variant doesn't count in the limit but always presents, + /// so we should subtract 1 from the total types count. + bool canAddNewVariants(size_t current_variants_count, size_t new_variants_count) const { return current_variants_count + new_variants_count - 1 <= max_dynamic_types; } + bool canAddNewVariant(size_t current_variants_count) const { return canAddNewVariants(current_variants_count, 1); } + bool canAddNewVariants(size_t new_variants_count) const { return canAddNewVariants(variant_info.variant_names.size(), new_variants_count); } + bool canAddNewVariant() const { return canAddNewVariants(variant_info.variant_names.size(), 1); } + + void setVariantType(const DataTypePtr & variant_type); + void setMaxDynamicPaths(size_t max_dynamic_type_); + + static const String & getSharedVariantTypeName() + { + static const String name = SHARED_VARIANT_TYPE_NAME; + return name; + } + + static DataTypePtr getSharedVariantDataType(); + + ColumnVariant::Discriminator getSharedVariantDiscriminator() const + { + return variant_info.variant_name_to_discriminator.at(getSharedVariantTypeName()); + } + + ColumnString & getSharedVariant() + { + return assert_cast(getVariantColumn().getVariantByGlobalDiscriminator(getSharedVariantDiscriminator())); + } + + const ColumnString & getSharedVariant() const + { + return assert_cast(getVariantColumn().getVariantByGlobalDiscriminator(getSharedVariantDiscriminator())); + } + + /// Serializes type and value in binary format into provided shared variant. Doesn't update Variant discriminators and offsets. + static void serializeValueIntoSharedVariant(ColumnString & shared_variant, const IColumn & src, const DataTypePtr & type, const SerializationPtr & serialization, size_t n); + + /// Insert value into shared variant. Also updates Variant discriminators and offsets. + void insertValueIntoSharedVariant(const IColumn & src, const DataTypePtr & type, const String & type_name, size_t n); + + const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type, const String & variant_name) const + { + /// Get serialization for provided data type. + /// To avoid calling type->getDefaultSerialization() every time we use simple cache with max size. + /// When max size is reached, just clear the cache. + if (serialization_cache.size() == SERIALIZATION_CACHE_MAX_SIZE) + serialization_cache.clear(); + + if (auto it = serialization_cache.find(variant_name); it != serialization_cache.end()) + return it->second; + + return serialization_cache.emplace(variant_name, variant_type->getDefaultSerialization()).first->second; + } + + const SerializationPtr & getVariantSerialization(const DataTypePtr & variant_type) const { return getVariantSerialization(variant_type, variant_type->getName()); } private: + void createVariantInfo(const DataTypePtr & variant_type); + /// Combine current variant with the other variant and return global discriminators mapping /// from other variant to the combined one. It's used for inserting from /// different variants. @@ -359,12 +435,19 @@ private: /// Store the type of current variant with some additional information. VariantInfo variant_info; /// The maximum number of different types that can be stored in this Dynamic column. - /// If exceeded, all new variants will be converted to String. + /// If exceeded, all new variants will be added to a special shared variant with type String + /// in binary format. This limit can be different for different instances of Dynamic column. + /// When max_dynamic_types = 0, we will have only shared variant and insert all values into it. size_t max_dynamic_types; + /// The types limit specified in the data type by the user Dynamic(max_types=N). + /// max_dynamic_types in all column instances of this Dynamic type can be only smaller + /// (for example, max_dynamic_types can be reduced in takeDynamicStructureFromSourceColumns + /// before merge of different Dynamic columns). + size_t global_max_dynamic_types; /// Size statistics of each variants from MergeTree data part. /// Used in takeDynamicStructureFromSourceColumns and set during deserialization. - Statistics statistics; + StatisticsPtr statistics; /// Cache (Variant name) -> (global discriminators mapping from this variant to current variant in Dynamic column). /// Used to avoid mappings recalculation in combineVariants for the same Variant types. @@ -372,6 +455,17 @@ private: /// Cache of Variant types that couldn't be combined with current variant in Dynamic column. /// Used to avoid checking if combination is possible for the same Variant types. std::unordered_set variants_with_failed_combination; + + /// We can use serializations of different data types to serialize values into shared variant. + /// To avoid creating the same serialization multiple times, use simple cache. + static const size_t SERIALIZATION_CACHE_MAX_SIZE = 256; + mutable std::unordered_map serialization_cache; }; +void extendVariantColumn( + IColumn & variant_column, + const DataTypePtr & old_variant_type, + const DataTypePtr & new_variant_type, + std::unordered_map old_variant_name_to_discriminator); + } diff --git a/src/Columns/ColumnFixedString.cpp b/src/Columns/ColumnFixedString.cpp index 0bb3f7edb14..04e894ee5ab 100644 --- a/src/Columns/ColumnFixedString.cpp +++ b/src/Columns/ColumnFixedString.cpp @@ -59,7 +59,7 @@ bool ColumnFixedString::isDefaultAt(size_t index) const void ColumnFixedString::insert(const Field & x) { - const String & s = x.get(); + const String & s = x.safeGet(); insertData(s.data(), s.size()); } @@ -67,7 +67,7 @@ bool ColumnFixedString::tryInsert(const Field & x) { if (x.getType() != Field::Types::Which::String) return false; - const String & s = x.get(); + const String & s = x.safeGet(); if (s.size() > n) return false; insertData(s.data(), s.size()); diff --git a/src/Columns/ColumnMap.cpp b/src/Columns/ColumnMap.cpp index 1025b4e77b9..651b073fef7 100644 --- a/src/Columns/ColumnMap.cpp +++ b/src/Columns/ColumnMap.cpp @@ -72,7 +72,7 @@ void ColumnMap::get(size_t n, Field & res) const size_t size = offsets[n] - offsets[n - 1]; res = Map(); - auto & map = res.get(); + auto & map = res.safeGet(); map.reserve(size); for (size_t i = 0; i < size; ++i) @@ -96,7 +96,7 @@ void ColumnMap::insertData(const char *, size_t) void ColumnMap::insert(const Field & x) { - const auto & map = x.get(); + const auto & map = x.safeGet(); nested->insert(Array(map.begin(), map.end())); } @@ -105,7 +105,7 @@ bool ColumnMap::tryInsert(const Field & x) if (x.getType() != Field::Types::Which::Map) return false; - const auto & map = x.get(); + const auto & map = x.safeGet(); return nested->tryInsert(Array(map.begin(), map.end())); } @@ -249,6 +249,15 @@ void ColumnMap::reserve(size_t n) nested->reserve(n); } +void ColumnMap::prepareForSquashing(const Columns & source_columns) +{ + Columns nested_source_columns; + nested_source_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + nested_source_columns.push_back(assert_cast(*source_column).getNestedColumnPtr()); + nested->prepareForSquashing(nested_source_columns); +} + void ColumnMap::shrinkToFit() { nested->shrinkToFit(); @@ -288,8 +297,8 @@ void ColumnMap::getExtremes(Field & min, Field & max) const /// Convert result Array fields to Map fields because client expect min and max field to have type Map - Array nested_min_value = nested_min.get(); - Array nested_max_value = nested_max.get(); + Array nested_min_value = nested_min.safeGet(); + Array nested_max_value = nested_max.safeGet(); Map map_min_value(nested_min_value.begin(), nested_min_value.end()); Map map_max_value(nested_max_value.begin(), nested_max_value.end()); diff --git a/src/Columns/ColumnMap.h b/src/Columns/ColumnMap.h index 3eaaa0ad562..191476839f1 100644 --- a/src/Columns/ColumnMap.h +++ b/src/Columns/ColumnMap.h @@ -94,6 +94,7 @@ public: void updatePermutation(IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnNullable.cpp b/src/Columns/ColumnNullable.cpp index 6529f0b78db..2a25cac6461 100644 --- a/src/Columns/ColumnNullable.cpp +++ b/src/Columns/ColumnNullable.cpp @@ -706,6 +706,22 @@ void ColumnNullable::reserve(size_t n) getNullMapData().reserve(n); } +void ColumnNullable::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + Columns nested_source_columns; + nested_source_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + { + const auto & source_nullable_column = assert_cast(*source_column); + new_size += source_nullable_column.size(); + nested_source_columns.push_back(source_nullable_column.getNestedColumnPtr()); + } + + nested_column->prepareForSquashing(nested_source_columns); + getNullMapData().reserve(new_size); +} + void ColumnNullable::shrinkToFit() { getNestedColumn().shrinkToFit(); diff --git a/src/Columns/ColumnNullable.h b/src/Columns/ColumnNullable.h index fe9f5b6dcc2..2c32e0fe5a0 100644 --- a/src/Columns/ColumnNullable.h +++ b/src/Columns/ColumnNullable.h @@ -125,6 +125,7 @@ public: size_t limit, int null_direction_hint, Permutation & res, EqualRanges& equal_ranges) const override; size_t estimateCardinalityInPermutedRange(const Permutation & permutation, const EqualRange & equal_range) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnObject.cpp b/src/Columns/ColumnObject.cpp index a6431007cb6..eb99bb4081b 100644 --- a/src/Columns/ColumnObject.cpp +++ b/src/Columns/ColumnObject.cpp @@ -698,7 +698,7 @@ void ColumnObject::forEachSubcolumnRecursively(RecursiveMutableColumnCallback ca void ColumnObject::insert(const Field & field) { - const auto & object = field.get(); + const auto & object = field.safeGet(); HashSet inserted_paths; size_t old_size = size(); @@ -754,7 +754,7 @@ void ColumnObject::get(size_t n, Field & res) const { assert(n < size()); res = Object(); - auto & object = res.get(); + auto & object = res.safeGet(); for (const auto & entry : subcolumns) { diff --git a/src/Columns/ColumnString.cpp b/src/Columns/ColumnString.cpp index 7cfa2571f5a..9ed2c7e3d4d 100644 --- a/src/Columns/ColumnString.cpp +++ b/src/Columns/ColumnString.cpp @@ -557,6 +557,21 @@ void ColumnString::reserve(size_t n) offsets.reserve_exact(n); } +void ColumnString::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + size_t new_chars_size = chars.size(); + for (const auto & source_column : source_columns) + { + const auto & source_string_column = assert_cast(*source_column); + new_size += source_string_column.size(); + new_chars_size += source_string_column.chars.size(); + } + + offsets.reserve_exact(new_size); + chars.reserve_exact(new_chars_size); +} + void ColumnString::shrinkToFit() { chars.shrink_to_fit(); diff --git a/src/Columns/ColumnString.h b/src/Columns/ColumnString.h index c1012e1e55e..5bbb3ad5151 100644 --- a/src/Columns/ColumnString.h +++ b/src/Columns/ColumnString.h @@ -123,7 +123,7 @@ public: void insert(const Field & x) override { - const String & s = x.get(); + const String & s = x.safeGet(); const size_t old_size = chars.size(); const size_t size_to_append = s.size() + 1; const size_t new_size = old_size + size_to_append; @@ -283,6 +283,7 @@ public: ColumnPtr compress() const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void getExtremes(Field & min, Field & max) const override; diff --git a/src/Columns/ColumnTuple.cpp b/src/Columns/ColumnTuple.cpp index 4fc3f88a87c..1539d7c8c2e 100644 --- a/src/Columns/ColumnTuple.cpp +++ b/src/Columns/ColumnTuple.cpp @@ -141,7 +141,7 @@ void ColumnTuple::get(size_t n, Field & res) const const size_t tuple_size = columns.size(); res = Tuple(); - Tuple & res_tuple = res.get(); + Tuple & res_tuple = res.safeGet(); res_tuple.reserve(tuple_size); for (size_t i = 0; i < tuple_size; ++i) @@ -169,7 +169,7 @@ void ColumnTuple::insertData(const char *, size_t) void ColumnTuple::insert(const Field & x) { - const auto & tuple = x.get(); + const auto & tuple = x.safeGet(); const size_t tuple_size = columns.size(); if (tuple.size() != tuple_size) @@ -185,7 +185,7 @@ bool ColumnTuple::tryInsert(const Field & x) if (x.getType() != Field::Types::Which::Tuple) return false; - const auto & tuple = x.get(); + const auto & tuple = x.safeGet(); const size_t tuple_size = columns.size(); if (tuple.size() != tuple_size) @@ -595,6 +595,19 @@ void ColumnTuple::reserve(size_t n) getColumn(i).reserve(n); } +void ColumnTuple::prepareForSquashing(const Columns & source_columns) +{ + const size_t tuple_size = columns.size(); + for (size_t i = 0; i < tuple_size; ++i) + { + Columns nested_columns; + nested_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + nested_columns.push_back(assert_cast(*source_column).getColumnPtr(i)); + getColumn(i).prepareForSquashing(nested_columns); + } +} + void ColumnTuple::shrinkToFit() { const size_t tuple_size = columns.size(); diff --git a/src/Columns/ColumnTuple.h b/src/Columns/ColumnTuple.h index 16b47a993f6..ef396d6a130 100644 --- a/src/Columns/ColumnTuple.h +++ b/src/Columns/ColumnTuple.h @@ -110,6 +110,7 @@ public: void updatePermutationWithCollation(const Collator & collator, IColumn::PermutationSortDirection direction, IColumn::PermutationSortStability stability, size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges& equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void shrinkToFit() override; void ensureOwnership() override; size_t byteSize() const override; diff --git a/src/Columns/ColumnVariant.cpp b/src/Columns/ColumnVariant.cpp index de7efb41d19..d4294478633 100644 --- a/src/Columns/ColumnVariant.cpp +++ b/src/Columns/ColumnVariant.cpp @@ -476,7 +476,7 @@ void ColumnVariant::insertFromImpl(const DB::IColumn & src_, size_t n, const std } } -void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping) +void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, const Discriminator * skip_discriminator) { const size_t num_variants = variants.size(); const auto & src = assert_cast(src_); @@ -557,9 +557,12 @@ void ColumnVariant::insertRangeFromImpl(const DB::IColumn & src_, size_t start, Discriminator global_discr = src_global_discr; if (global_discriminators_mapping && src_global_discr != NULL_DISCRIMINATOR) global_discr = (*global_discriminators_mapping)[src_global_discr]; - Discriminator local_discr = localDiscriminatorByGlobal(global_discr); - if (nested_length) - variants[local_discr]->insertRangeFrom(*src.variants[src_local_discr], nested_start, nested_length); + if (!skip_discriminator || global_discr != *skip_discriminator) + { + Discriminator local_discr = localDiscriminatorByGlobal(global_discr); + if (nested_length) + variants[local_discr]->insertRangeFrom(*src.variants[src_local_discr], nested_start, nested_length); + } } } @@ -610,7 +613,7 @@ void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t l void ColumnVariant::doInsertRangeFrom(const IColumn & src_, size_t start, size_t length) #endif { - insertRangeFromImpl(src_, start, length, nullptr); + insertRangeFromImpl(src_, start, length, nullptr, nullptr); } #if !defined(DEBUG_OR_SANITIZER_BUILD) @@ -627,9 +630,9 @@ void ColumnVariant::insertFrom(const DB::IColumn & src_, size_t n, const std::ve insertFromImpl(src_, n, &global_discriminators_mapping); } -void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping) +void ColumnVariant::insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping, Discriminator skip_discriminator) { - insertRangeFromImpl(src_, start, length, &global_discriminators_mapping); + insertRangeFromImpl(src_, start, length, &global_discriminators_mapping, &skip_discriminator); } void ColumnVariant::insertManyFrom(const DB::IColumn & src_, size_t position, size_t length, const std::vector & global_discriminators_mapping) @@ -673,6 +676,14 @@ void ColumnVariant::insertManyIntoVariantFrom(DB::ColumnVariant::Discriminator g variants[local_discr]->insertManyFrom(src_, position, length); } +void ColumnVariant::deserializeBinaryIntoVariant(ColumnVariant::Discriminator global_discr, const SerializationPtr & serialization, ReadBuffer & buf, const FormatSettings & format_settings) +{ + auto local_discr = localDiscriminatorByGlobal(global_discr); + serialization->deserializeBinary(*variants[local_discr], buf, format_settings); + getLocalDiscriminators().push_back(local_discr); + getOffsets().push_back(variants[local_discr]->size() - 1); +} + void ColumnVariant::insertDefault() { getLocalDiscriminators().push_back(NULL_DISCRIMINATOR); @@ -1213,9 +1224,7 @@ struct ColumnVariant::ComparatorBase ALWAYS_INLINE int compare(size_t lhs, size_t rhs) const { - int res = parent.compareAt(lhs, rhs, parent, nan_direction_hint); - - return res; + return parent.compareAt(lhs, rhs, parent, nan_direction_hint); } }; @@ -1247,8 +1256,25 @@ void ColumnVariant::updatePermutation(IColumn::PermutationSortDirection directio void ColumnVariant::reserve(size_t n) { - local_discriminators->reserve(n); - offsets->reserve(n); + getLocalDiscriminators().reserve_exact(n); + getOffsets().reserve_exact(n); +} + +void ColumnVariant::prepareForSquashing(const Columns & source_columns) +{ + size_t new_size = size(); + for (const auto & source_column : source_columns) + new_size += source_column->size(); + reserve(new_size); + + for (size_t i = 0; i != variants.size(); ++i) + { + Columns source_variant_columns; + source_variant_columns.reserve(source_columns.size()); + for (const auto & source_column : source_columns) + source_variant_columns.push_back(assert_cast(*source_column).getVariantPtrByGlobalDiscriminator(i)); + getVariantByGlobalDiscriminator(i).prepareForSquashing(source_variant_columns); + } } void ColumnVariant::ensureOwnership() diff --git a/src/Columns/ColumnVariant.h b/src/Columns/ColumnVariant.h index 34c24b5428d..787b2f75247 100644 --- a/src/Columns/ColumnVariant.h +++ b/src/Columns/ColumnVariant.h @@ -2,6 +2,8 @@ #include #include +#include +#include namespace DB @@ -196,13 +198,15 @@ public: /// Methods for insertion from another Variant but with known mapping between global discriminators. void insertFrom(const IColumn & src_, size_t n, const std::vector & global_discriminators_mapping); - void insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping); + /// Don't insert data into variant with skip_discriminator global discriminator, it will be processed separately. + void insertRangeFrom(const IColumn & src_, size_t start, size_t length, const std::vector & global_discriminators_mapping, Discriminator skip_discriminator); void insertManyFrom(const IColumn & src_, size_t position, size_t length, const std::vector & global_discriminators_mapping); /// Methods for insertion into a specific variant. void insertIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t n); void insertRangeIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t start, size_t length); void insertManyIntoVariantFrom(Discriminator global_discr, const IColumn & src_, size_t position, size_t length); + void deserializeBinaryIntoVariant(Discriminator global_discr, const SerializationPtr & serialization, ReadBuffer & buf, const FormatSettings & format_settings); void insertDefault() override; void insertManyDefaults(size_t length) override; @@ -237,6 +241,7 @@ public: size_t limit, int nan_direction_hint, IColumn::Permutation & res, EqualRanges & equal_ranges) const override; void reserve(size_t n) override; + void prepareForSquashing(const Columns & source_columns) override; void ensureOwnership() override; size_t byteSize() const override; size_t byteSizeAt(size_t n) const override; @@ -263,6 +268,7 @@ public: ColumnPtr & getVariantPtrByGlobalDiscriminator(size_t discr) { return variants[global_to_local_discriminators.at(discr)]; } const NestedColumns & getVariants() const { return variants; } + NestedColumns & getVariants() { return variants; } const IColumn & getLocalDiscriminatorsColumn() const { return *local_discriminators; } IColumn & getLocalDiscriminatorsColumn() { return *local_discriminators; } @@ -302,6 +308,8 @@ public: return true; } + std::vector getLocalToGlobalDiscriminatorsMapping() const { return local_to_global_discriminators; } + /// Check if we have only 1 non-empty variant and no NULL values, /// and if so, return the discriminator of this non-empty column. std::optional getLocalDiscriminatorOfOneNoneEmptyVariantNoNulls() const; @@ -322,7 +330,7 @@ public: private: void insertFromImpl(const IColumn & src_, size_t n, const std::vector * global_discriminators_mapping); - void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping); + void insertRangeFromImpl(const IColumn & src_, size_t start, size_t length, const std::vector * global_discriminators_mapping, const Discriminator * skip_discriminator); void insertManyFromImpl(const IColumn & src_, size_t position, size_t length, const std::vector * global_discriminators_mapping); void initIdentityGlobalToLocalDiscriminatorsMapping(); diff --git a/src/Columns/ColumnVector.h b/src/Columns/ColumnVector.h index 2fe5b635bd2..a5e1ee4b462 100644 --- a/src/Columns/ColumnVector.h +++ b/src/Columns/ColumnVector.h @@ -85,7 +85,7 @@ public: void insertMany(const Field & field, size_t length) override { - data.resize_fill(data.size() + length, static_cast(field.get())); + data.resize_fill(data.size() + length, static_cast(field.safeGet())); } void insertData(const char * pos, size_t) override @@ -235,7 +235,7 @@ public: void insert(const Field & x) override { - data.push_back(static_cast(x.get())); + data.push_back(static_cast(x.safeGet())); } bool tryInsert(const DB::Field & x) override; diff --git a/src/Columns/IColumn.h b/src/Columns/IColumn.h index f9c1a3e7034..edcb9f0bc30 100644 --- a/src/Columns/IColumn.h +++ b/src/Columns/IColumn.h @@ -475,6 +475,15 @@ public: /// It affects performance only (not correctness). virtual void reserve(size_t /*n*/) {} + /// Reserve memory before squashing all specified source columns into this column. + virtual void prepareForSquashing(const std::vector & source_columns) + { + size_t new_size = size(); + for (const auto & source_column : source_columns) + new_size += source_column->size(); + reserve(new_size); + } + /// Requests the removal of unused capacity. /// It is a non-binding request to reduce the capacity of the underlying container to its size. virtual void shrinkToFit() {} diff --git a/src/Columns/tests/gtest_column_dynamic.cpp b/src/Columns/tests/gtest_column_dynamic.cpp index a2862b09de1..de76261229d 100644 --- a/src/Columns/tests/gtest_column_dynamic.cpp +++ b/src/Columns/tests/gtest_column_dynamic.cpp @@ -7,28 +7,34 @@ using namespace DB; TEST(ColumnDynamic, CreateEmpty) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); ASSERT_TRUE(column->empty()); - ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant()"); - ASSERT_TRUE(column->getVariantInfo().variant_names.empty()); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.empty()); + ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)"); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_names[0], "SharedVariant"); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0); + ASSERT_TRUE(column->getVariantColumn().getVariantByGlobalDiscriminator(0).empty()); } TEST(ColumnDynamic, InsertDefault) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); column->insertDefault(); ASSERT_TRUE(column->size() == 1); - ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant()"); - ASSERT_TRUE(column->getVariantInfo().variant_names.empty()); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.empty()); + ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(SharedVariant)"); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_names[0], "SharedVariant"); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.size(), 1); + ASSERT_EQ(column->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0); + ASSERT_TRUE(column->getVariantColumn().getVariantByGlobalDiscriminator(0).empty()); ASSERT_TRUE(column->isNullAt(0)); ASSERT_EQ((*column)[0], Field(Null())); } TEST(ColumnDynamic, InsertFields) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); column->insert(Field(42)); column->insert(Field(-42)); column->insert(Field("str1")); @@ -41,16 +47,16 @@ TEST(ColumnDynamic, InsertFields) column->insert(Field(43.43)); ASSERT_TRUE(column->size() == 10); - ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, String)"); - std::vector expected_names = {"Float64", "Int8", "String"}; + ASSERT_EQ(column->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, SharedVariant, String)"); + std::vector expected_names = {"Float64", "Int8", "SharedVariant", "String"}; ASSERT_EQ(column->getVariantInfo().variant_names, expected_names); - std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"String", 2}}; + std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}; ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator == expected_variant_name_to_discriminator); } ColumnDynamic::MutablePtr getDynamicWithManyVariants(size_t num_variants, Field tuple_element = Field(42)) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); for (size_t i = 0; i != num_variants; ++i) { Tuple tuple; @@ -66,61 +72,71 @@ TEST(ColumnDynamic, InsertFieldsOverflow1) { auto column = getDynamicWithManyVariants(253); - ASSERT_EQ(column->getVariantInfo().variant_names.size(), 253); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254); column->insert(Field(42.42)); - ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column->size(), 254); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("Float64")); column->insert(Field(42)); + ASSERT_EQ(column->size(), 255); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 1); Field field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "42"); + ASSERT_EQ(field, 42); column->insert(Field(43)); + ASSERT_EQ(column->size(), 256); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 2); field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "43"); + ASSERT_EQ(field, 43); column->insert(Field("str1")); + ASSERT_EQ(column->size(), 257); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 3); field = (*column)[column->size() - 1]; ASSERT_EQ(field, "str1"); column->insert(Field(Array({Field(42), Field(43)}))); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 4); field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "[42, 43]"); + ASSERT_EQ(field, Field(Array({Field(42), Field(43)}))); } TEST(ColumnDynamic, InsertFieldsOverflow2) { auto column = getDynamicWithManyVariants(254); - ASSERT_EQ(column->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); column->insert(Field("str1")); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 1); + Field field = (*column)[column->size() - 1]; + ASSERT_EQ(field, "str1"); column->insert(Field(42)); ASSERT_EQ(column->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); - Field field = (*column)[column->size() - 1]; - ASSERT_EQ(field, "42"); + ASSERT_FALSE(column->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column->getSharedVariant().size(), 2); + field = (*column)[column->size() - 1]; + ASSERT_EQ(field, 42); } ColumnDynamic::MutablePtr getInsertFromColumn(size_t num = 1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); for (size_t i = 0; i != num; ++i) { column_from->insert(Field(42)); @@ -154,41 +170,41 @@ void checkInsertFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynami TEST(ColumnDynamic, InsertFrom1) { - auto column_to = ColumnDynamic::create(255); - checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + auto column_to = ColumnDynamic::create(254); + checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertFrom2) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); - checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertFrom3) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); column_to->insert(Array({Field(42)})); - checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}}); + checkInsertFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}}); } TEST(ColumnDynamic, InsertFromOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); auto column_to = getDynamicWithManyVariants(253); column_to->insertFrom(*column_from, 0); - ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); auto field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, 42); @@ -196,20 +212,22 @@ TEST(ColumnDynamic, InsertFromOverflow1) column_to->insertFrom(*column_from, 1); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); column_to->insertFrom(*column_from, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, "str"); } TEST(ColumnDynamic, InsertFromOverflow2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); @@ -221,9 +239,32 @@ TEST(ColumnDynamic, InsertFromOverflow2) column_to->insertFrom(*column_from, 1); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); +} + +TEST(ColumnDynamic, InsertFromOverflow3) +{ + auto column_from = ColumnDynamic::create(1); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + + auto column_to = ColumnDynamic::create(254); + column_to->insert(Field(41)); + + column_to->insertFrom(*column_from, 0); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); + auto field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42); + + column_to->insertFrom(*column_from, 1); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42.42); } void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynamic::MutablePtr & column_to, const std::string & expected_variant, const std::vector & expected_names, const std::unordered_map & expected_variant_name_to_discriminator) @@ -256,42 +297,43 @@ void checkInsertManyFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDy TEST(ColumnDynamic, InsertManyFrom1) { - auto column_to = ColumnDynamic::create(255); - checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + auto column_to = ColumnDynamic::create(254); + checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertManyFrom2) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); - checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertManyFrom3) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str")); column_to->insert(Array({Field(42)})); - checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}}); + checkInsertManyFrom(getInsertFromColumn(), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}}); } TEST(ColumnDynamic, InsertManyFromOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); auto column_to = getDynamicWithManyVariants(253); column_to->insertManyFrom(*column_from, 0, 2); - ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); auto field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, 42); field = (*column_to)[column_to->size() - 1]; @@ -300,15 +342,17 @@ TEST(ColumnDynamic, InsertManyFromOverflow1) column_to->insertManyFrom(*column_from, 1, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); column_to->insertManyFrom(*column_from, 2, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, "str"); field = (*column_to)[column_to->size() - 2]; @@ -317,14 +361,15 @@ TEST(ColumnDynamic, InsertManyFromOverflow1) TEST(ColumnDynamic, InsertManyFromOverflow2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); auto column_to = getDynamicWithManyVariants(253); column_to->insertManyFrom(*column_from, 0, 2); - ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 254); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); auto field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, 42); field = (*column_to)[column_to->size() - 1]; @@ -333,11 +378,39 @@ TEST(ColumnDynamic, InsertManyFromOverflow2) column_to->insertManyFrom(*column_from, 1, 2); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, "42.42"); + ASSERT_EQ(field, 42.42); +} + + +TEST(ColumnDynamic, InsertManyFromOverflow3) +{ + auto column_from = ColumnDynamic::create(1); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + + auto column_to = ColumnDynamic::create(254); + column_to->insert(Field(41)); + + column_to->insertManyFrom(*column_from, 0, 2); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_EQ(column_to->getSharedVariant().size(), 0); + auto field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, 42); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42); + + column_to->insertManyFrom(*column_from, 1, 2); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, 42.42); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, 42.42); } void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnDynamic::MutablePtr & column_to, const std::string & expected_variant, const std::vector & expected_names, const std::unordered_map & expected_variant_name_to_discriminator) @@ -368,34 +441,34 @@ void checkInsertRangeFrom(const ColumnDynamic::MutablePtr & column_from, ColumnD TEST(ColumnDynamic, InsertRangeFrom1) { - auto column_to = ColumnDynamic::create(255); - checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + auto column_to = ColumnDynamic::create(254); + checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertRangeFrom2) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str1")); - checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, String)", {"Float64", "Int8", "String"}, {{"Float64", 0}, {"Int8", 1}, {"String", 2}}); + checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Float64, Int8, SharedVariant, String)", {"Float64", "Int8", "SharedVariant", "String"}, {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}); } TEST(ColumnDynamic, InsertRangeFrom3) { - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); column_to->insert(Field(42)); column_to->insert(Field(42.42)); column_to->insert(Field("str1")); column_to->insert(Array({Field(42)})); - checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Array(Int8), Float64, Int8, String)", {"Array(Int8)", "Float64", "Int8", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"String", 3}}); + checkInsertRangeFrom(getInsertFromColumn(2), column_to, "Variant(Array(Int8), Float64, Int8, SharedVariant, String)", {"Array(Int8)", "Float64", "Int8", "SharedVariant", "String"}, {{"Array(Int8)", 0}, {"Float64", 1}, {"Int8", 2}, {"SharedVariant", 3}, {"String", 4}}); } TEST(ColumnDynamic, InsertRangeFromOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -403,23 +476,25 @@ TEST(ColumnDynamic, InsertRangeFromOverflow1) auto column_to = getDynamicWithManyVariants(253); column_to->insertRangeFrom(*column_from, 0, 4); + ASSERT_EQ(column_to->size(), 257); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); auto field = (*column_to)[column_to->size() - 4]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, Field("str")); } TEST(ColumnDynamic, InsertRangeFromOverflow2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -428,19 +503,20 @@ TEST(ColumnDynamic, InsertRangeFromOverflow2) column_to->insertRangeFrom(*column_from, 0, 3); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 1); auto field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); } TEST(ColumnDynamic, InsertRangeFromOverflow3) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -449,20 +525,21 @@ TEST(ColumnDynamic, InsertRangeFromOverflow3) column_to->insert(Field("Str")); column_to->insertRangeFrom(*column_from, 0, 3); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 3); auto field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); } TEST(ColumnDynamic, InsertRangeFromOverflow4) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -471,19 +548,20 @@ TEST(ColumnDynamic, InsertRangeFromOverflow4) column_to->insertRangeFrom(*column_from, 0, 3); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 3); auto field = (*column_to)[column_to->size() - 3]; - ASSERT_EQ(field, Field("42")); + ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, Field("str")); } TEST(ColumnDynamic, InsertRangeFromOverflow5) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(42.42)); @@ -493,22 +571,23 @@ TEST(ColumnDynamic, InsertRangeFromOverflow5) column_to->insert(Field("str")); column_to->insertRangeFrom(*column_from, 0, 4); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_EQ(column_to->getSharedVariant().size(), 3); auto field = (*column_to)[column_to->size() - 4]; ASSERT_EQ(field, Field(42)); field = (*column_to)[column_to->size() - 3]; ASSERT_EQ(field, Field(43)); field = (*column_to)[column_to->size() - 2]; - ASSERT_EQ(field, Field("42.42")); + ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 1]; ASSERT_EQ(field, Field("str")); } TEST(ColumnDynamic, InsertRangeFromOverflow6) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(43)); column_from->insert(Field(44)); @@ -520,13 +599,14 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6) auto column_to = getDynamicWithManyVariants(253); column_to->insertRangeFrom(*column_from, 2, 5); ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 255); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); - ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); auto field = (*column_to)[column_to->size() - 5]; - ASSERT_EQ(field, Field("44")); + ASSERT_EQ(field, Field(44)); field = (*column_to)[column_to->size() - 4]; ASSERT_EQ(field, Field(42.42)); field = (*column_to)[column_to->size() - 3]; @@ -534,12 +614,136 @@ TEST(ColumnDynamic, InsertRangeFromOverflow6) field = (*column_to)[column_to->size() - 2]; ASSERT_EQ(field, Field("str")); field = (*column_to)[column_to->size() - 1]; - ASSERT_EQ(field, Field("[42]")); + ASSERT_EQ(field, Field(Array({Field(42)}))); +} + +TEST(ColumnDynamic, InsertRangeFromOverflow7) +{ + auto column_from = ColumnDynamic::create(2); + column_from->insert(Field(42.42)); + column_from->insert(Field("str1")); + column_from->insert(Field(42)); + column_from->insert(Field(43.43)); + column_from->insert(Field(Array({Field(41)}))); + column_from->insert(Field(43)); + column_from->insert(Field("str2")); + column_from->insert(Field(Array({Field(42)}))); + + auto column_to = ColumnDynamic::create(254); + column_to->insert(Field(42)); + + column_to->insertRangeFrom(*column_from, 0, 8); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 4); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); + auto field = (*column_to)[column_to->size() - 8]; + ASSERT_EQ(field, Field(42.42)); + field = (*column_to)[column_to->size() - 7]; + ASSERT_EQ(field, Field("str1")); + field = (*column_to)[column_to->size() - 6]; + ASSERT_EQ(field, Field(42)); + field = (*column_to)[column_to->size() - 5]; + ASSERT_EQ(field, Field(43.43)); + field = (*column_to)[column_to->size() - 4]; + ASSERT_EQ(field, Field(Array({Field(41)}))); + field = (*column_to)[column_to->size() - 3]; + ASSERT_EQ(field, Field(43)); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, Field(Array({Field(42)}))); +} + +TEST(ColumnDynamic, InsertRangeFromOverflow8) +{ + auto column_from = ColumnDynamic::create(2); + column_from->insert(Field(42.42)); + column_from->insert(Field("str1")); + column_from->insert(Field(42)); + column_from->insert(Field(43.43)); + column_from->insert(Field(Array({Field(41)}))); + column_from->insert(Field(43)); + column_from->insert(Field("str2")); + column_from->insert(Field(Array({Field(42)}))); + + auto column_to = ColumnDynamic::create(2); + column_to->insert(Field(42)); + column_from->insert(Field("str1")); + + column_to->insertRangeFrom(*column_from, 0, 8); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 3); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); + auto field = (*column_to)[column_to->size() - 8]; + ASSERT_EQ(field, Field(42.42)); + field = (*column_to)[column_to->size() - 7]; + ASSERT_EQ(field, Field("str1")); + field = (*column_to)[column_to->size() - 6]; + ASSERT_EQ(field, Field(42)); + field = (*column_to)[column_to->size() - 5]; + ASSERT_EQ(field, Field(43.43)); + field = (*column_to)[column_to->size() - 4]; + ASSERT_EQ(field, Field(Array({Field(41)}))); + field = (*column_to)[column_to->size() - 3]; + ASSERT_EQ(field, Field(43)); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, Field(Array({Field(42)}))); +} + +TEST(ColumnDynamic, InsertRangeFromOverflow9) +{ + auto column_from = ColumnDynamic::create(3); + column_from->insert(Field("str1")); + column_from->insert(Field(42.42)); + column_from->insert(Field("str2")); + column_from->insert(Field(42)); + column_from->insert(Field(43.43)); + column_from->insert(Field(Array({Field(41)}))); + column_from->insert(Field(43)); + column_from->insert(Field("str2")); + column_from->insert(Field(Array({Field(42)}))); + + auto column_to = ColumnDynamic::create(2); + column_to->insert(Field(42)); + + column_to->insertRangeFrom(*column_from, 0, 9); + ASSERT_EQ(column_to->getVariantInfo().variant_names.size(), 3); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 4); + auto field = (*column_to)[column_to->size() - 9]; + ASSERT_EQ(field, Field("str1")); + field = (*column_to)[column_to->size() - 8]; + ASSERT_EQ(field, Field(42.42)); + field = (*column_to)[column_to->size() - 7]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 6]; + ASSERT_EQ(field, Field(42)); + field = (*column_to)[column_to->size() - 5]; + ASSERT_EQ(field, Field(43.43)); + field = (*column_to)[column_to->size() - 4]; + ASSERT_EQ(field, Field(Array({Field(41)}))); + field = (*column_to)[column_to->size() - 3]; + ASSERT_EQ(field, Field(43)); + field = (*column_to)[column_to->size() - 2]; + ASSERT_EQ(field, Field("str2")); + field = (*column_to)[column_to->size() - 1]; + ASSERT_EQ(field, Field(Array({Field(42)}))); } TEST(ColumnDynamic, SerializeDeserializeFromArena1) { - auto column = ColumnDynamic::create(255); + auto column = ColumnDynamic::create(254); column->insert(Field(42)); column->insert(Field(42.42)); column->insert(Field("str")); @@ -564,7 +768,7 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena1) TEST(ColumnDynamic, SerializeDeserializeFromArena2) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -577,26 +781,26 @@ TEST(ColumnDynamic, SerializeDeserializeFromArena2) column_from->serializeValueIntoArena(2, arena, pos); column_from->serializeValueIntoArena(3, arena, pos); - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); pos = column_to->deserializeAndInsertFromArena(ref1.data); pos = column_to->deserializeAndInsertFromArena(pos); pos = column_to->deserializeAndInsertFromArena(pos); column_to->deserializeAndInsertFromArena(pos); - ASSERT_EQ((*column_from)[column_from->size() - 4], 42); - ASSERT_EQ((*column_from)[column_from->size() - 3], 42.42); - ASSERT_EQ((*column_from)[column_from->size() - 2], "str"); - ASSERT_EQ((*column_from)[column_from->size() - 1], Null()); - ASSERT_EQ(column_to->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, String)"); - std::vector expected_names = {"Float64", "Int8", "String"}; + ASSERT_EQ((*column_to)[column_to->size() - 4], 42); + ASSERT_EQ((*column_to)[column_to->size() - 3], 42.42); + ASSERT_EQ((*column_to)[column_to->size() - 2], "str"); + ASSERT_EQ((*column_to)[column_to->size() - 1], Null()); + ASSERT_EQ(column_to->getVariantInfo().variant_type->getName(), "Variant(Float64, Int8, SharedVariant, String)"); + std::vector expected_names = {"Float64", "Int8", "SharedVariant", "String"}; ASSERT_EQ(column_to->getVariantInfo().variant_names, expected_names); - std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"String", 2}}; + std::unordered_map expected_variant_name_to_discriminator = {{"Float64", 0}, {"Int8", 1}, {"SharedVariant", 2}, {"String", 3}}; ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator == expected_variant_name_to_discriminator); } -TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow) +TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow1) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(254); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -615,18 +819,56 @@ TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow) pos = column_to->deserializeAndInsertFromArena(pos); column_to->deserializeAndInsertFromArena(pos); - ASSERT_EQ((*column_from)[column_from->size() - 4], 42); - ASSERT_EQ((*column_from)[column_from->size() - 3], 42.42); - ASSERT_EQ((*column_from)[column_from->size() - 2], "str"); - ASSERT_EQ((*column_from)[column_from->size() - 1], Null()); + ASSERT_EQ((*column_to)[column_to->size() - 4], 42); + ASSERT_EQ((*column_to)[column_to->size() - 3], 42.42); + ASSERT_EQ((*column_to)[column_to->size() - 2], "str"); + ASSERT_EQ((*column_to)[column_to->size() - 1], Null()); ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); +} + +TEST(ColumnDynamic, SerializeDeserializeFromArenaOverflow2) +{ + auto column_from = ColumnDynamic::create(2); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + column_from->insert(Field("str")); + column_from->insert(Field(Null())); + column_from->insert(Field(Array({Field(42)}))); + + Arena arena; + const char * pos = nullptr; + auto ref1 = column_from->serializeValueIntoArena(0, arena, pos); + column_from->serializeValueIntoArena(1, arena, pos); + column_from->serializeValueIntoArena(2, arena, pos); + column_from->serializeValueIntoArena(3, arena, pos); + column_from->serializeValueIntoArena(4, arena, pos); + + auto column_to = ColumnDynamic::create(2); + column_to->insert(Field(42.42)); + pos = column_to->deserializeAndInsertFromArena(ref1.data); + pos = column_to->deserializeAndInsertFromArena(pos); + pos = column_to->deserializeAndInsertFromArena(pos); + pos = column_to->deserializeAndInsertFromArena(pos); + column_to->deserializeAndInsertFromArena(pos); + + ASSERT_EQ((*column_to)[column_to->size() - 5], 42); + ASSERT_EQ((*column_to)[column_to->size() - 4], 42.42); + ASSERT_EQ((*column_to)[column_to->size() - 3], "str"); + ASSERT_EQ((*column_to)[column_to->size() - 2], Null()); + ASSERT_EQ((*column_to)[column_to->size() - 1], Field(Array({Field(42)}))); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Int8")); + ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Float64")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("String")); + ASSERT_FALSE(column_to->getVariantInfo().variant_name_to_discriminator.contains("Array(Int8)")); + ASSERT_EQ(column_to->getSharedVariant().size(), 2); } TEST(ColumnDynamic, skipSerializedInArena) { - auto column_from = ColumnDynamic::create(255); + auto column_from = ColumnDynamic::create(3); column_from->insert(Field(42)); column_from->insert(Field(42.42)); column_from->insert(Field("str")); @@ -640,13 +882,41 @@ TEST(ColumnDynamic, skipSerializedInArena) auto ref4 = column_from->serializeValueIntoArena(3, arena, pos); const char * end = ref4.data + ref4.size; - auto column_to = ColumnDynamic::create(255); + auto column_to = ColumnDynamic::create(254); pos = column_to->skipSerializedInArena(ref1.data); pos = column_to->skipSerializedInArena(pos); pos = column_to->skipSerializedInArena(pos); pos = column_to->skipSerializedInArena(pos); ASSERT_EQ(pos, end); - ASSERT_TRUE(column_to->getVariantInfo().variant_name_to_discriminator.empty()); - ASSERT_TRUE(column_to->getVariantInfo().variant_names.empty()); + ASSERT_EQ(column_to->getVariantInfo().variant_name_to_discriminator.at("SharedVariant"), 0); + ASSERT_EQ(column_to->getVariantInfo().variant_names, Names{"SharedVariant"}); +} + +TEST(ColumnDynamic, compare) +{ + auto column_from = ColumnDynamic::create(3); + column_from->insert(Field(42)); + column_from->insert(Field(42.42)); + column_from->insert(Field("str")); + column_from->insert(Field(Null())); + column_from->insert(Field(Array({Field(42)}))); + + ASSERT_EQ(column_from->compareAt(0, 0, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(0, 1, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(1, 1, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(0, 2, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(2, 0, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(2, 4, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(4, 2, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(4, 4, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(0, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(1, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(2, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(3, 3, *column_from, -1), 0); + ASSERT_EQ(column_from->compareAt(4, 3, *column_from, -1), 1); + ASSERT_EQ(column_from->compareAt(3, 0, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(3, 1, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(3, 2, *column_from, -1), -1); + ASSERT_EQ(column_from->compareAt(3, 4, *column_from, -1), -1); } diff --git a/src/Columns/tests/gtest_column_variant.cpp b/src/Columns/tests/gtest_column_variant.cpp index 25f276b9600..5e481b88409 100644 --- a/src/Columns/tests/gtest_column_variant.cpp +++ b/src/Columns/tests/gtest_column_variant.cpp @@ -108,10 +108,10 @@ void checkColumnVariant1(ColumnVariant * column) ASSERT_EQ(offsets[1], 0); ASSERT_EQ(offsets[3], 1); ASSERT_TRUE(column->isDefaultAt(2) && column->isDefaultAt(4)); - ASSERT_EQ((*column)[0].get(), 42); - ASSERT_EQ((*column)[1].get(), "Hello"); + ASSERT_EQ((*column)[0].safeGet(), 42); + ASSERT_EQ((*column)[1].safeGet(), "Hello"); ASSERT_TRUE((*column)[2].isNull()); - ASSERT_EQ((*column)[3].get(), "World"); + ASSERT_EQ((*column)[3].safeGet(), "World"); ASSERT_TRUE((*column)[4].isNull()); } @@ -209,9 +209,9 @@ TEST(ColumnVariant, CreateFromDiscriminatorsAndOneFullColumnNoNulls) ASSERT_EQ(offsets[0], 0); ASSERT_EQ(offsets[1], 1); ASSERT_EQ(offsets[2], 2); - ASSERT_EQ((*column)[0].get(), 0); - ASSERT_EQ((*column)[1].get(), 1); - ASSERT_EQ((*column)[2].get(), 2); + ASSERT_EQ((*column)[0].safeGet(), 0); + ASSERT_EQ((*column)[1].safeGet(), 1); + ASSERT_EQ((*column)[2].safeGet(), 2); } TEST(ColumnVariant, CreateFromDiscriminatorsAndOneFullColumnNoNullsWithLocalOrder) @@ -222,9 +222,9 @@ TEST(ColumnVariant, CreateFromDiscriminatorsAndOneFullColumnNoNullsWithLocalOrde ASSERT_EQ(offsets[0], 0); ASSERT_EQ(offsets[1], 1); ASSERT_EQ(offsets[2], 2); - ASSERT_EQ((*column)[0].get(), 0); - ASSERT_EQ((*column)[1].get(), 1); - ASSERT_EQ((*column)[2].get(), 2); + ASSERT_EQ((*column)[0].safeGet(), 0); + ASSERT_EQ((*column)[1].safeGet(), 1); + ASSERT_EQ((*column)[2].safeGet(), 2); ASSERT_EQ(column->localDiscriminatorAt(0), 2); ASSERT_EQ(column->localDiscriminatorAt(1), 2); ASSERT_EQ(column->localDiscriminatorAt(2), 2); @@ -331,9 +331,9 @@ TEST(ColumnVariant, CloneResizedGeneral1) ASSERT_EQ(offsets[0], 0); ASSERT_EQ(offsets[1], 0); ASSERT_EQ(offsets[3], 1); - ASSERT_EQ((*resized_column_variant)[0].get(), 42); - ASSERT_EQ((*resized_column_variant)[1].get(), "Hello"); - ASSERT_EQ((*resized_column_variant)[3].get(), 43); + ASSERT_EQ((*resized_column_variant)[0].safeGet(), 42); + ASSERT_EQ((*resized_column_variant)[1].safeGet(), "Hello"); + ASSERT_EQ((*resized_column_variant)[3].safeGet(), 43); } TEST(ColumnVariant, CloneResizedGeneral2) @@ -367,7 +367,7 @@ TEST(ColumnVariant, CloneResizedGeneral2) ASSERT_EQ(discriminators[2], ColumnVariant::NULL_DISCRIMINATOR); const auto & offsets = resized_column_variant->getOffsets(); ASSERT_EQ(offsets[0], 0); - ASSERT_EQ((*resized_column_variant)[0].get(), 42); + ASSERT_EQ((*resized_column_variant)[0].safeGet(), 42); } TEST(ColumnVariant, CloneResizedGeneral3) @@ -405,10 +405,10 @@ TEST(ColumnVariant, CloneResizedGeneral3) ASSERT_EQ(offsets[1], 0); ASSERT_EQ(offsets[2], 1); ASSERT_EQ(offsets[3], 1); - ASSERT_EQ((*resized_column_variant)[0].get(), 42); - ASSERT_EQ((*resized_column_variant)[1].get(), "Hello"); - ASSERT_EQ((*resized_column_variant)[2].get(), "World"); - ASSERT_EQ((*resized_column_variant)[3].get(), 43); + ASSERT_EQ((*resized_column_variant)[0].safeGet(), 42); + ASSERT_EQ((*resized_column_variant)[1].safeGet(), "Hello"); + ASSERT_EQ((*resized_column_variant)[2].safeGet(), "World"); + ASSERT_EQ((*resized_column_variant)[3].safeGet(), 43); } MutableColumnPtr createDiscriminators2() @@ -465,7 +465,7 @@ TEST(ColumnVariant, InsertFrom) auto column_from = createVariantColumn2(change_order); column_to->insertFrom(*column_from, 3); ASSERT_EQ(column_to->globalDiscriminatorAt(5), 0); - ASSERT_EQ((*column_to)[5].get(), 43); + ASSERT_EQ((*column_to)[5].safeGet(), 43); } } @@ -478,8 +478,8 @@ TEST(ColumnVariant, InsertRangeFromOneColumnNoNulls) column_to->insertRangeFrom(*column_from, 2, 2); ASSERT_EQ(column_to->globalDiscriminatorAt(7), 0); ASSERT_EQ(column_to->globalDiscriminatorAt(8), 0); - ASSERT_EQ((*column_to)[7].get(), 2); - ASSERT_EQ((*column_to)[8].get(), 3); + ASSERT_EQ((*column_to)[7].safeGet(), 2); + ASSERT_EQ((*column_to)[8].safeGet(), 3); } } @@ -494,9 +494,9 @@ TEST(ColumnVariant, InsertRangeFromGeneral) ASSERT_EQ(column_to->globalDiscriminatorAt(6), ColumnVariant::NULL_DISCRIMINATOR); ASSERT_EQ(column_to->globalDiscriminatorAt(7), 0); ASSERT_EQ(column_to->globalDiscriminatorAt(8), 1); - ASSERT_EQ((*column_to)[5].get(), "Hello"); - ASSERT_EQ((*column_to)[7].get(), 43); - ASSERT_EQ((*column_to)[8].get(), "World"); + ASSERT_EQ((*column_to)[5].safeGet(), "Hello"); + ASSERT_EQ((*column_to)[7].safeGet(), 43); + ASSERT_EQ((*column_to)[8].safeGet(), "World"); } } @@ -509,8 +509,8 @@ TEST(ColumnVariant, InsertManyFrom) column_to->insertManyFrom(*column_from, 3, 2); ASSERT_EQ(column_to->globalDiscriminatorAt(5), 0); ASSERT_EQ(column_to->globalDiscriminatorAt(6), 0); - ASSERT_EQ((*column_to)[5].get(), 43); - ASSERT_EQ((*column_to)[6].get(), 43); + ASSERT_EQ((*column_to)[5].safeGet(), 43); + ASSERT_EQ((*column_to)[6].safeGet(), 43); } } @@ -520,8 +520,8 @@ TEST(ColumnVariant, PopBackOneColumnNoNulls) column->popBack(3); ASSERT_EQ(column->size(), 2); ASSERT_EQ(column->getVariantByLocalDiscriminator(0).size(), 2); - ASSERT_EQ((*column)[0].get(), 0); - ASSERT_EQ((*column)[1].get(), 1); + ASSERT_EQ((*column)[0].safeGet(), 0); + ASSERT_EQ((*column)[1].safeGet(), 1); } TEST(ColumnVariant, PopBackGeneral) @@ -531,8 +531,8 @@ TEST(ColumnVariant, PopBackGeneral) ASSERT_EQ(column->size(), 3); ASSERT_EQ(column->getVariantByLocalDiscriminator(0).size(), 1); ASSERT_EQ(column->getVariantByLocalDiscriminator(1).size(), 1); - ASSERT_EQ((*column)[0].get(), 42); - ASSERT_EQ((*column)[1].get(), "Hello"); + ASSERT_EQ((*column)[0].safeGet(), 42); + ASSERT_EQ((*column)[1].safeGet(), "Hello"); ASSERT_TRUE((*column)[2].isNull()); } @@ -545,8 +545,8 @@ TEST(ColumnVariant, FilterOneColumnNoNulls) filter.push_back(1); auto filtered_column = column->filter(filter, -1); ASSERT_EQ(filtered_column->size(), 2); - ASSERT_EQ((*filtered_column)[0].get(), 0); - ASSERT_EQ((*filtered_column)[1].get(), 2); + ASSERT_EQ((*filtered_column)[0].safeGet(), 0); + ASSERT_EQ((*filtered_column)[1].safeGet(), 2); } TEST(ColumnVariant, FilterGeneral) @@ -562,7 +562,7 @@ TEST(ColumnVariant, FilterGeneral) filter.push_back(0); auto filtered_column = column->filter(filter, -1); ASSERT_EQ(filtered_column->size(), 3); - ASSERT_EQ((*filtered_column)[0].get(), "Hello"); + ASSERT_EQ((*filtered_column)[0].safeGet(), "Hello"); ASSERT_TRUE((*filtered_column)[1].isNull()); ASSERT_TRUE((*filtered_column)[2].isNull()); } @@ -577,9 +577,9 @@ TEST(ColumnVariant, PermuteAndIndexOneColumnNoNulls) permutation.push_back(0); auto permuted_column = column->permute(permutation, 3); ASSERT_EQ(permuted_column->size(), 3); - ASSERT_EQ((*permuted_column)[0].get(), 1); - ASSERT_EQ((*permuted_column)[1].get(), 3); - ASSERT_EQ((*permuted_column)[2].get(), 2); + ASSERT_EQ((*permuted_column)[0].safeGet(), 1); + ASSERT_EQ((*permuted_column)[1].safeGet(), 3); + ASSERT_EQ((*permuted_column)[2].safeGet(), 2); auto index = ColumnUInt64::create(); index->getData().push_back(1); @@ -588,9 +588,9 @@ TEST(ColumnVariant, PermuteAndIndexOneColumnNoNulls) index->getData().push_back(0); auto indexed_column = column->index(*index, 3); ASSERT_EQ(indexed_column->size(), 3); - ASSERT_EQ((*indexed_column)[0].get(), 1); - ASSERT_EQ((*indexed_column)[1].get(), 3); - ASSERT_EQ((*indexed_column)[2].get(), 2); + ASSERT_EQ((*indexed_column)[0].safeGet(), 1); + ASSERT_EQ((*indexed_column)[1].safeGet(), 3); + ASSERT_EQ((*indexed_column)[2].safeGet(), 2); } TEST(ColumnVariant, PermuteGeneral) @@ -603,9 +603,9 @@ TEST(ColumnVariant, PermuteGeneral) permutation.push_back(5); auto permuted_column = column->permute(permutation, 4); ASSERT_EQ(permuted_column->size(), 4); - ASSERT_EQ((*permuted_column)[0].get(), 43); - ASSERT_EQ((*permuted_column)[1].get(), "World"); - ASSERT_EQ((*permuted_column)[2].get(), "Hello"); + ASSERT_EQ((*permuted_column)[0].safeGet(), 43); + ASSERT_EQ((*permuted_column)[1].safeGet(), "World"); + ASSERT_EQ((*permuted_column)[2].safeGet(), "Hello"); ASSERT_TRUE((*permuted_column)[3].isNull()); } @@ -618,12 +618,12 @@ TEST(ColumnVariant, ReplicateOneColumnNoNull) offsets.push_back(6); auto replicated_column = column->replicate(offsets); ASSERT_EQ(replicated_column->size(), 6); - ASSERT_EQ((*replicated_column)[0].get(), 1); - ASSERT_EQ((*replicated_column)[1].get(), 1); - ASSERT_EQ((*replicated_column)[2].get(), 1); - ASSERT_EQ((*replicated_column)[3].get(), 2); - ASSERT_EQ((*replicated_column)[4].get(), 2); - ASSERT_EQ((*replicated_column)[5].get(), 2); + ASSERT_EQ((*replicated_column)[0].safeGet(), 1); + ASSERT_EQ((*replicated_column)[1].safeGet(), 1); + ASSERT_EQ((*replicated_column)[2].safeGet(), 1); + ASSERT_EQ((*replicated_column)[3].safeGet(), 2); + ASSERT_EQ((*replicated_column)[4].safeGet(), 2); + ASSERT_EQ((*replicated_column)[5].safeGet(), 2); } TEST(ColumnVariant, ReplicateGeneral) @@ -637,9 +637,9 @@ TEST(ColumnVariant, ReplicateGeneral) offsets.push_back(7); auto replicated_column = column->replicate(offsets); ASSERT_EQ(replicated_column->size(), 7); - ASSERT_EQ((*replicated_column)[0].get(), 42); - ASSERT_EQ((*replicated_column)[1].get(), "Hello"); - ASSERT_EQ((*replicated_column)[2].get(), "Hello"); + ASSERT_EQ((*replicated_column)[0].safeGet(), 42); + ASSERT_EQ((*replicated_column)[1].safeGet(), "Hello"); + ASSERT_EQ((*replicated_column)[2].safeGet(), "Hello"); ASSERT_TRUE((*replicated_column)[3].isNull()); ASSERT_TRUE((*replicated_column)[4].isNull()); ASSERT_TRUE((*replicated_column)[5].isNull()); @@ -657,13 +657,13 @@ TEST(ColumnVariant, ScatterOneColumnNoNulls) selector.push_back(1); auto columns = column->scatter(3, selector); ASSERT_EQ(columns[0]->size(), 2); - ASSERT_EQ((*columns[0])[0].get(), 0); - ASSERT_EQ((*columns[0])[1].get(), 3); + ASSERT_EQ((*columns[0])[0].safeGet(), 0); + ASSERT_EQ((*columns[0])[1].safeGet(), 3); ASSERT_EQ(columns[1]->size(), 2); - ASSERT_EQ((*columns[1])[0].get(), 1); - ASSERT_EQ((*columns[1])[1].get(), 4); + ASSERT_EQ((*columns[1])[0].safeGet(), 1); + ASSERT_EQ((*columns[1])[1].safeGet(), 4); ASSERT_EQ(columns[2]->size(), 1); - ASSERT_EQ((*columns[2])[0].get(), 2); + ASSERT_EQ((*columns[2])[0].safeGet(), 2); } TEST(ColumnVariant, ScatterGeneral) @@ -680,12 +680,12 @@ TEST(ColumnVariant, ScatterGeneral) auto columns = column->scatter(3, selector); ASSERT_EQ(columns[0]->size(), 3); - ASSERT_EQ((*columns[0])[0].get(), 42); - ASSERT_EQ((*columns[0])[1].get(), "Hello"); - ASSERT_EQ((*columns[0])[2].get(), 43); + ASSERT_EQ((*columns[0])[0].safeGet(), 42); + ASSERT_EQ((*columns[0])[1].safeGet(), "Hello"); + ASSERT_EQ((*columns[0])[2].safeGet(), 43); ASSERT_EQ(columns[1]->size(), 2); - ASSERT_EQ((*columns[1])[0].get(), "World"); - ASSERT_EQ((*columns[1])[1].get(), 44); + ASSERT_EQ((*columns[1])[0].safeGet(), "World"); + ASSERT_EQ((*columns[1])[1].safeGet(), 44); ASSERT_EQ(columns[2]->size(), 2); ASSERT_TRUE((*columns[2])[0].isNull()); ASSERT_TRUE((*columns[2])[1].isNull()); diff --git a/src/Columns/tests/gtest_low_cardinality.cpp b/src/Columns/tests/gtest_low_cardinality.cpp index 5e01279b7df..ce16d2cadb1 100644 --- a/src/Columns/tests/gtest_low_cardinality.cpp +++ b/src/Columns/tests/gtest_low_cardinality.cpp @@ -20,13 +20,13 @@ void testLowCardinalityNumberInsert(const DataTypePtr & data_type) Field value; column->get(0, value); - ASSERT_EQ(value.get(), 15); + ASSERT_EQ(value.safeGet(), 15); column->get(1, value); - ASSERT_EQ(value.get(), 20); + ASSERT_EQ(value.safeGet(), 20); column->get(2, value); - ASSERT_EQ(value.get(), 25); + ASSERT_EQ(value.safeGet(), 25); } TEST(ColumnLowCardinality, Insert) diff --git a/src/Common/AsynchronousMetrics.cpp b/src/Common/AsynchronousMetrics.cpp index 56e7c4f3405..9b6a7428411 100644 --- a/src/Common/AsynchronousMetrics.cpp +++ b/src/Common/AsynchronousMetrics.cpp @@ -1,18 +1,24 @@ -#include #include -#include -#include -#include -#include -#include -#include + #include #include +#include +#include #include #include #include #include +#include +#include +#include +#include +#include +#include + +#include + #include +#include #include "config.h" @@ -52,6 +58,12 @@ static std::unique_ptr openFileIfExists(const std::stri return {}; } +static void openCgroupv2MetricFile(const std::string & filename, std::optional & out) +{ + if (auto path = getCgroupsV2PathContainingFile(filename)) + openFileIfExists((path.value() + filename).c_str(), out); +}; + #endif @@ -63,21 +75,15 @@ AsynchronousMetrics::AsynchronousMetrics( , protocol_server_metrics_func(protocol_server_metrics_func_) { #if defined(OS_LINUX) - openFileIfExists("/proc/meminfo", meminfo); - openFileIfExists("/proc/loadavg", loadavg); - openFileIfExists("/proc/stat", proc_stat); openFileIfExists("/proc/cpuinfo", cpuinfo); openFileIfExists("/proc/sys/fs/file-nr", file_nr); - openFileIfExists("/proc/uptime", uptime); openFileIfExists("/proc/net/dev", net_dev); /// CGroups v2 - openFileIfExists("/sys/fs/cgroup/memory.max", cgroupmem_limit_in_bytes); - if (cgroupmem_limit_in_bytes) - { - openFileIfExists("/sys/fs/cgroup/memory.current", cgroupmem_usage_in_bytes); - } - openFileIfExists("/sys/fs/cgroup/cpu.max", cgroupcpu_max); + openCgroupv2MetricFile("memory.max", cgroupmem_limit_in_bytes); + openCgroupv2MetricFile("memory.current", cgroupmem_usage_in_bytes); + openCgroupv2MetricFile("cpu.max", cgroupcpu_max); + openCgroupv2MetricFile("cpu.stat", cgroupcpu_stat); /// CGroups v1 if (!cgroupmem_limit_in_bytes) @@ -90,6 +96,21 @@ AsynchronousMetrics::AsynchronousMetrics( openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_period_us", cgroupcpu_cfs_period); openFileIfExists("/sys/fs/cgroup/cpu/cpu.cfs_quota_us", cgroupcpu_cfs_quota); } + if (!cgroupcpu_stat) + openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); + + if (!cgroupcpu_stat && !cgroupcpuacct_stat) + { + /// The following metrics are not cgroup-aware and we've found cgroup-specific metric files for the similar metrics, + /// so we're better not reporting them at all to avoid confusion + openFileIfExists("/proc/loadavg", loadavg); + openFileIfExists("/proc/stat", proc_stat); + openFileIfExists("/proc/uptime", uptime); + } + + /// The same story for memory metrics + if (!cgroupmem_limit_in_bytes) + openFileIfExists("/proc/meminfo", meminfo); openFileIfExists("/proc/sys/vm/max_map_count", vm_max_map_count); openFileIfExists("/proc/self/maps", vm_maps); @@ -570,6 +591,151 @@ AsynchronousMetrics::NetworkInterfaceStatValues::operator-(const AsynchronousMet #endif +#if defined(OS_LINUX) +void AsynchronousMetrics::applyCPUMetricsUpdate( + AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier) +{ + new_values["OSUserTime" + cpu_suffix] + = {delta_values.user * multiplier, + "The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the " + "host machine, not just clickhouse-server." + " This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline " + "stalls, branch mispredictions, running another SMT core)." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSNiceTime" + cpu_suffix] + = {delta_values.nice * multiplier, + "The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all " + "the processes on the host machine, not just clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSSystemTime" + cpu_suffix] + = {delta_values.system * multiplier, + "The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIdleTime" + cpu_suffix] + = {delta_values.idle * multiplier, + "The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This " + "is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." + " This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline " + "stalls, branch mispredictions, running another SMT core)." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIOWaitTime" + cpu_suffix] + = {delta_values.iowait * multiplier, + "The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as " + "the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just " + "clickhouse-server." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSIrqTime" + cpu_suffix] + = {delta_values.irq * multiplier, + "The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " A high number of this metric may indicate hardware misconfiguration or a very high network load." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSSoftIrqTime" + cpu_suffix] + = {delta_values.softirq * multiplier, + "The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the " + "processes on the host machine, not just clickhouse-server." + " A high number of this metric may indicate inefficient software running on the system." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSStealTime" + cpu_suffix] + = {delta_values.steal * multiplier, + "The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide " + "metric, it includes all the processes on the host machine, not just clickhouse-server." + " Not every virtualized environments present this metric, and most of them don't." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSGuestTime" + cpu_suffix] + = {delta_values.guest * multiplier, + "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man " + "procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." + " This metric is irrelevant for ClickHouse, but still exists for completeness." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; + new_values["OSGuestNiceTime" + cpu_suffix] + = {delta_values.guest_nice * multiplier, + "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest " + "was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host " + "machine, not just clickhouse-server." + " This metric is irrelevant for ClickHouse, but still exists for completeness." + " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across " + "them [0..num cores]."}; +} + +void AsynchronousMetrics::applyNormalizedCPUMetricsUpdate( + AsynchronousMetricValues & new_values, double num_cpus_to_normalize, const ProcStatValuesCPU & delta_values_all_cpus, double multiplier) +{ + chassert(num_cpus_to_normalize); + + new_values["OSUserTimeNormalized"] + = {delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, + "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSNiceTimeNormalized"] + = {delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, + "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSSystemTimeNormalized"] + = {delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, + "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIdleTimeNormalized"] + = {delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIOWaitTimeNormalized"] + = {delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSIrqTimeNormalized"] + = {delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, + "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of " + "the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSSoftIrqTimeNormalized"] + = {delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, + "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval " + "regardless of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSStealTimeNormalized"] + = {delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, + "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSGuestTimeNormalized"] + = {delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, + "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless " + "of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; + new_values["OSGuestNiceTimeNormalized"] + = {delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, + "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval " + "regardless of the number of cores." + " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is " + "non-uniform, and still get the average resource utilization metric."}; +} +#endif + void AsynchronousMetrics::update(TimePoint update_time, bool force_update) { Stopwatch watch; @@ -831,7 +997,68 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) new_values["CGroupMaxCPU"] = { max_cpu_cgroups, "The maximum number of CPU cores according to CGroups."}; } - if (proc_stat) + if (cgroupcpu_stat || cgroupcpuacct_stat) + { + try + { + ReadBufferFromFilePRead & in = cgroupcpu_stat ? *cgroupcpu_stat : *cgroupcpuacct_stat; + ProcStatValuesCPU current_values{}; + + /// We re-read the file from the beginning each time + in.rewind(); + + while (!in.eof()) + { + String name; + readStringUntilWhitespace(name, in); + skipWhitespaceIfAny(in); + + /// `user_usec` for cgroup v2 and `user` for cgroup v1 + if (name.starts_with("user")) + { + readText(current_values.user, in); + skipToNextLineOrEOF(in); + } + /// `system_usec` for cgroup v2 and `system` for cgroup v1 + else if (name.starts_with("system")) + { + readText(current_values.system, in); + skipToNextLineOrEOF(in); + } + else + skipToNextLineOrEOF(in); + } + + if (!first_run) + { + auto get_clock_ticks = [&]() + { + if (auto hz = sysconf(_SC_CLK_TCK); hz != -1) + return hz; + else + throw ErrnoException(ErrorCodes::CANNOT_SYSCONF, "Cannot call 'sysconf' to obtain system HZ"); + }; + const auto cgroup_version_specific_divisor = cgroupcpu_stat ? 1e6 : get_clock_ticks(); + const double multiplier = 1.0 / cgroup_version_specific_divisor + / (std::chrono::duration_cast(time_since_previous_update).count() / 1e9); + + const ProcStatValuesCPU delta_values = current_values - proc_stat_values_all_cpus; + applyCPUMetricsUpdate(new_values, /*cpu_suffix=*/"", delta_values, multiplier); + if (max_cpu_cgroups > 0) + applyNormalizedCPUMetricsUpdate(new_values, max_cpu_cgroups, delta_values, multiplier); + } + + proc_stat_values_all_cpus = current_values; + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + openCgroupv2MetricFile("cpu.stat", cgroupcpu_stat); + if (!cgroupcpu_stat) + openFileIfExists("/sys/fs/cgroup/cpuacct/cpuacct.stat", cgroupcpuacct_stat); + } + } + else if (proc_stat) { try { @@ -886,43 +1113,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) else delta_values_all_cpus = delta_values; - new_values["OSUserTime" + cpu_suffix] = { delta_values.user * multiplier, - "The ratio of time the CPU core was running userspace code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This includes also the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSNiceTime" + cpu_suffix] = { delta_values.nice * multiplier, - "The ratio of time the CPU core was running userspace code with higher priority. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSSystemTime" + cpu_suffix] = { delta_values.system * multiplier, - "The ratio of time the CPU core was running OS kernel (system) code. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIdleTime" + cpu_suffix] = { delta_values.idle * multiplier, - "The ratio of time the CPU core was idle (not even ready to run a process waiting for IO) from the OS kernel standpoint. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This does not include the time when the CPU was under-utilized due to the reasons internal to the CPU (memory loads, pipeline stalls, branch mispredictions, running another SMT core)." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIOWaitTime" + cpu_suffix] = { delta_values.iowait * multiplier, - "The ratio of time the CPU core was not running the code but when the OS kernel did not run any other process on this CPU as the processes were waiting for IO. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSIrqTime" + cpu_suffix] = { delta_values.irq * multiplier, - "The ratio of time spent for running hardware interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " A high number of this metric may indicate hardware misconfiguration or a very high network load." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSSoftIrqTime" + cpu_suffix] = { delta_values.softirq * multiplier, - "The ratio of time spent for running software interrupt requests on the CPU. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " A high number of this metric may indicate inefficient software running on the system." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSStealTime" + cpu_suffix] = { delta_values.steal * multiplier, - "The ratio of time spent in other operating systems by the CPU when running in a virtualized environment. This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " Not every virtualized environments present this metric, and most of them don't." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSGuestTime" + cpu_suffix] = { delta_values.guest * multiplier, - "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This metric is irrelevant for ClickHouse, but still exists for completeness." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; - new_values["OSGuestNiceTime" + cpu_suffix] = { delta_values.guest_nice * multiplier, - "The ratio of time spent running a virtual CPU for guest operating systems under the control of the Linux kernel, when a guest was set to a higher priority (See `man procfs`). This is a system-wide metric, it includes all the processes on the host machine, not just clickhouse-server." - " This metric is irrelevant for ClickHouse, but still exists for completeness." - " The value for a single CPU core will be in the interval [0..1]. The value for all CPU cores is calculated as a sum across them [0..num cores]."}; + applyCPUMetricsUpdate(new_values, cpu_suffix, delta_values, multiplier); } prev_values = current_values; @@ -978,38 +1169,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) Float64 num_cpus_to_normalize = max_cpu_cgroups > 0 ? max_cpu_cgroups : num_cpus; if (num_cpus_to_normalize > 0) - { - new_values["OSUserTimeNormalized"] = { delta_values_all_cpus.user * multiplier / num_cpus_to_normalize, - "The value is similar to `OSUserTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSNiceTimeNormalized"] = { delta_values_all_cpus.nice * multiplier / num_cpus_to_normalize, - "The value is similar to `OSNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSystemTimeNormalized"] = { delta_values_all_cpus.system * multiplier / num_cpus_to_normalize, - "The value is similar to `OSSystemTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIdleTimeNormalized"] = { delta_values_all_cpus.idle * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIdleTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIOWaitTimeNormalized"] = { delta_values_all_cpus.iowait * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIOWaitTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSIrqTimeNormalized"] = { delta_values_all_cpus.irq * multiplier / num_cpus_to_normalize, - "The value is similar to `OSIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSSoftIrqTimeNormalized"] = { delta_values_all_cpus.softirq * multiplier / num_cpus_to_normalize, - "The value is similar to `OSSoftIrqTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSStealTimeNormalized"] = { delta_values_all_cpus.steal * multiplier / num_cpus_to_normalize, - "The value is similar to `OSStealTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestTimeNormalized"] = { delta_values_all_cpus.guest * multiplier / num_cpus_to_normalize, - "The value is similar to `OSGuestTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - new_values["OSGuestNiceTimeNormalized"] = { delta_values_all_cpus.guest_nice * multiplier / num_cpus_to_normalize, - "The value is similar to `OSGuestNiceTime` but divided to the number of CPU cores to be measured in the [0..1] interval regardless of the number of cores." - " This allows you to average the values of this metric across multiple servers in a cluster even if the number of cores is non-uniform, and still get the average resource utilization metric."}; - } + applyNormalizedCPUMetricsUpdate(new_values, num_cpus_to_normalize, delta_values_all_cpus, multiplier); } proc_stat_values_other = current_other_values; @@ -1042,8 +1202,7 @@ void AsynchronousMetrics::update(TimePoint update_time, bool force_update) tryLogCurrentException(__PRETTY_FUNCTION__); } } - - if (meminfo) + else if (meminfo) { try { diff --git a/src/Common/AsynchronousMetrics.h b/src/Common/AsynchronousMetrics.h index 04d0319e35b..78d07ef4b6c 100644 --- a/src/Common/AsynchronousMetrics.h +++ b/src/Common/AsynchronousMetrics.h @@ -126,6 +126,8 @@ private: std::optional cgroupcpu_cfs_period TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_cfs_quota TSA_GUARDED_BY(data_mutex); std::optional cgroupcpu_max TSA_GUARDED_BY(data_mutex); + std::optional cgroupcpu_stat TSA_GUARDED_BY(data_mutex); + std::optional cgroupcpuacct_stat TSA_GUARDED_BY(data_mutex); std::optional vm_max_map_count TSA_GUARDED_BY(data_mutex); std::optional vm_maps TSA_GUARDED_BY(data_mutex); @@ -221,6 +223,16 @@ private: void openBlockDevices(); void openSensorsChips(); void openEDAC(); + + void applyCPUMetricsUpdate( + AsynchronousMetricValues & new_values, const std::string & cpu_suffix, const ProcStatValuesCPU & delta_values, double multiplier); + + void applyNormalizedCPUMetricsUpdate( + AsynchronousMetricValues & new_values, + double num_cpus_to_normalize, + const ProcStatValuesCPU & delta_values_all_cpus, + double multiplier); + #endif void run(); diff --git a/src/Common/CgroupsMemoryUsageObserver.cpp b/src/Common/CgroupsMemoryUsageObserver.cpp index ef8bdfc1823..83b04360164 100644 --- a/src/Common/CgroupsMemoryUsageObserver.cpp +++ b/src/Common/CgroupsMemoryUsageObserver.cpp @@ -144,31 +144,6 @@ private: /// - I did not test what happens if a host has v1 and v2 simultaneously enabled. I believe such /// systems existed only for a short transition period. -std::optional getCgroupsV2Path() -{ - if (!cgroupsV2Enabled()) - return {}; - - if (!cgroupsV2MemoryControllerEnabled()) - return {}; - - fs::path current_cgroup = cgroupV2PathOfProcess(); - if (current_cgroup.empty()) - return {}; - - /// Return the bottom-most nested current memory file. If there is no such file at the current - /// level, try again at the parent level as memory settings are inherited. - while (current_cgroup != default_cgroups_mount.parent_path()) - { - const auto current_path = current_cgroup / "memory.current"; - const auto stat_path = current_cgroup / "memory.stat"; - if (fs::exists(current_path) && fs::exists(stat_path)) - return {current_cgroup}; - current_cgroup = current_cgroup.parent_path(); - } - return {}; -} - std::optional getCgroupsV1Path() { auto path = default_cgroups_mount / "memory/memory.stat"; @@ -179,7 +154,7 @@ std::optional getCgroupsV1Path() std::pair getCgroupsPath() { - auto v2_path = getCgroupsV2Path(); + auto v2_path = getCgroupsV2PathContainingFile("memory.current"); if (v2_path.has_value()) return {*v2_path, CgroupsMemoryUsageObserver::CgroupsVersion::V2}; diff --git a/src/Common/CollectionOfDerived.h b/src/Common/CollectionOfDerived.h index 9f80ff727b4..bcbcc36c67a 100644 --- a/src/Common/CollectionOfDerived.h +++ b/src/Common/CollectionOfDerived.h @@ -168,7 +168,7 @@ private: records.emplace(it, type_idx, item); } - Records::const_iterator getImpl(std::type_index type_idx) const + typename Records::const_iterator getImpl(std::type_index type_idx) const { auto it = std::lower_bound(records.cbegin(), records.cend(), type_idx); diff --git a/src/Common/CurrentMetrics.cpp b/src/Common/CurrentMetrics.cpp index b6dd14d292c..67890568941 100644 --- a/src/Common/CurrentMetrics.cpp +++ b/src/Common/CurrentMetrics.cpp @@ -307,7 +307,7 @@ M(FilteringMarksWithPrimaryKey, "Number of threads currently doing filtering of mark ranges by the primary key") \ M(FilteringMarksWithSecondaryKeys, "Number of threads currently doing filtering of mark ranges by secondary keys") \ \ - M(S3DiskNoKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ + M(DiskS3NoSuchKeyErrors, "The number of `NoSuchKey` errors that occur when reading data from S3 cloud storage through ClickHouse disks.") \ #ifdef APPLY_FOR_EXTERNAL_METRICS #define APPLY_FOR_METRICS(M) APPLY_FOR_BUILTIN_METRICS(M) APPLY_FOR_EXTERNAL_METRICS(M) diff --git a/src/Common/FailPoint.cpp b/src/Common/FailPoint.cpp index 0b1ec552d43..b2fcbc77c56 100644 --- a/src/Common/FailPoint.cpp +++ b/src/Common/FailPoint.cpp @@ -7,6 +7,8 @@ #include #include +#include "config.h" + namespace DB { @@ -15,7 +17,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; }; -#if FIU_ENABLE +#if USE_LIBFIU static struct InitFiu { InitFiu() @@ -135,7 +137,7 @@ void FailPointInjection::pauseFailPoint(const String & fail_point_name) void FailPointInjection::enableFailPoint(const String & fail_point_name) { -#if FIU_ENABLE +#if USE_LIBFIU #define SUB_M(NAME, flags, pause) \ if (fail_point_name == FailPoints::NAME) \ { \ diff --git a/src/Common/FailPoint.h b/src/Common/FailPoint.h index b3e1214d597..1af13d08553 100644 --- a/src/Common/FailPoint.h +++ b/src/Common/FailPoint.h @@ -1,17 +1,16 @@ #pragma once -#include "config.h" #include #include #include +#include "config.h" + #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdocumentation" #pragma clang diagnostic ignored "-Wreserved-macro-identifier" - -#include -#include - +# include +# include #pragma clang diagnostic pop #include diff --git a/src/Common/FieldBinaryEncoding.cpp b/src/Common/FieldBinaryEncoding.cpp index 6c1a8496fe6..23263c988c3 100644 --- a/src/Common/FieldBinaryEncoding.cpp +++ b/src/Common/FieldBinaryEncoding.cpp @@ -208,7 +208,7 @@ void FieldVisitorEncodeBinary::operator() (const Map & x, WriteBuffer & buf) con writeVarUInt(size, buf); for (size_t i = 0; i < size; ++i) { - const Tuple & key_and_value = x[i].get(); + const Tuple & key_and_value = x[i].safeGet(); Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[0]); Field::dispatch([&buf] (const auto & value) { FieldVisitorEncodeBinary()(value, buf); }, key_and_value[1]); } diff --git a/src/Common/FieldVisitorSum.cpp b/src/Common/FieldVisitorSum.cpp index b825f188586..af9503ac046 100644 --- a/src/Common/FieldVisitorSum.cpp +++ b/src/Common/FieldVisitorSum.cpp @@ -19,7 +19,7 @@ bool FieldVisitorSum::operator() (UInt64 & x) const return x != 0; } -bool FieldVisitorSum::operator() (Float64 & x) const { x += rhs.get(); return x != 0; } +bool FieldVisitorSum::operator() (Float64 & x) const { x += rhs.safeGet(); return x != 0; } bool FieldVisitorSum::operator() (Null &) const { diff --git a/src/Common/FieldVisitorSum.h b/src/Common/FieldVisitorSum.h index cbb4c4a1de3..d28676b5093 100644 --- a/src/Common/FieldVisitorSum.h +++ b/src/Common/FieldVisitorSum.h @@ -37,7 +37,7 @@ public: template bool operator() (DecimalField & x) const { - x += rhs.get>(); + x += rhs.safeGet>(); return x.getValue() != T(0); } diff --git a/src/Common/FieldVisitorToString.cpp b/src/Common/FieldVisitorToString.cpp index c4cb4266418..2148bac20d1 100644 --- a/src/Common/FieldVisitorToString.cpp +++ b/src/Common/FieldVisitorToString.cpp @@ -172,7 +172,7 @@ String FieldVisitorToString::operator() (const Object & x) const String convertFieldToString(const Field & field) { if (field.getType() == Field::Types::Which::String) - return field.get(); + return field.safeGet(); return applyVisitor(FieldVisitorToString(), field); } diff --git a/src/Common/HashTable/HashMap.h b/src/Common/HashTable/HashMap.h index a26797a687a..92621db5558 100644 --- a/src/Common/HashTable/HashMap.h +++ b/src/Common/HashTable/HashMap.h @@ -297,7 +297,7 @@ public: } /// Only inserts the value if key isn't already present - void ALWAYS_INLINE insertIfNotPresent(const Key & x, const Cell::Mapped & value) + void ALWAYS_INLINE insertIfNotPresent(const Key & x, const typename Cell::Mapped & value) { LookupResult it; bool inserted; diff --git a/src/Common/ProfileEvents.cpp b/src/Common/ProfileEvents.cpp index ccdce7ff584..d43d9fdcea8 100644 --- a/src/Common/ProfileEvents.cpp +++ b/src/Common/ProfileEvents.cpp @@ -209,8 +209,35 @@ \ M(Merge, "Number of launched background merges.") \ M(MergedRows, "Rows read for background merges. This is the number of rows before merge.") \ + M(MergedColumns, "Number of columns merged during the horizontal stage of merges.") \ + M(GatheredColumns, "Number of columns gathered during the vertical stage of merges.") \ M(MergedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for background merges. This is the number before merge.") \ - M(MergesTimeMilliseconds, "Total time spent for background merges.")\ + M(MergeTotalMilliseconds, "Total time spent for background merges") \ + M(MergeExecuteMilliseconds, "Total busy time spent for execution of background merges") \ + M(MergeHorizontalStageTotalMilliseconds, "Total time spent for horizontal stage of background merges") \ + M(MergeHorizontalStageExecuteMilliseconds, "Total busy time spent for execution of horizontal stage of background merges") \ + M(MergeVerticalStageTotalMilliseconds, "Total time spent for vertical stage of background merges") \ + M(MergeVerticalStageExecuteMilliseconds, "Total busy time spent for execution of vertical stage of background merges") \ + M(MergeProjectionStageTotalMilliseconds, "Total time spent for projection stage of background merges") \ + M(MergeProjectionStageExecuteMilliseconds, "Total busy time spent for execution of projection stage of background merges") \ + \ + M(MergingSortedMilliseconds, "Total time spent while merging sorted columns") \ + M(AggregatingSortedMilliseconds, "Total time spent while aggregating sorted columns") \ + M(CollapsingSortedMilliseconds, "Total time spent while collapsing sorted columns") \ + M(ReplacingSortedMilliseconds, "Total time spent while replacing sorted columns") \ + M(SummingSortedMilliseconds, "Total time spent while summing sorted columns") \ + M(VersionedCollapsingSortedMilliseconds, "Total time spent while version collapsing sorted columns") \ + M(GatheringColumnMilliseconds, "Total time spent while gathering columns for vertical merge") \ + \ + M(MutationTotalParts, "Number of total parts for which mutations tried to be applied") \ + M(MutationUntouchedParts, "Number of total parts for which mutations tried to be applied but which was completely skipped according to predicate") \ + M(MutatedRows, "Rows read for mutations. This is the number of rows before mutation") \ + M(MutatedUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) that was read for mutations. This is the number before mutation.") \ + M(MutationTotalMilliseconds, "Total time spent for mutations.") \ + M(MutationExecuteMilliseconds, "Total busy time spent for execution of mutations.") \ + M(MutationAllPartColumns, "Number of times when task to mutate all columns in part was created") \ + M(MutationSomePartColumns, "Number of times when task to mutate some columns in part was created") \ + M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections in mutations.") \ \ M(MergeTreeDataWriterRows, "Number of rows INSERTed to MergeTree tables.") \ M(MergeTreeDataWriterUncompressedBytes, "Uncompressed bytes (for columns as they stored in memory) INSERTed to MergeTree tables.") \ @@ -225,7 +252,6 @@ M(MergeTreeDataWriterProjectionsCalculationMicroseconds, "Time spent calculating projections") \ M(MergeTreeDataProjectionWriterSortingBlocksMicroseconds, "Time spent sorting blocks (for projection it might be a key different from table's sorting key)") \ M(MergeTreeDataProjectionWriterMergingBlocksMicroseconds, "Time spent merging blocks") \ - M(MutateTaskProjectionsCalculationMicroseconds, "Time spent calculating projections") \ \ M(InsertedWideParts, "Number of parts inserted in Wide format.") \ M(InsertedCompactParts, "Number of parts inserted in Compact format.") \ diff --git a/src/Common/QueryFuzzer.cpp b/src/Common/QueryFuzzer.cpp index 161c38f20e0..0b2f6c09b45 100644 --- a/src/Common/QueryFuzzer.cpp +++ b/src/Common/QueryFuzzer.cpp @@ -132,7 +132,7 @@ Field QueryFuzzer::fuzzField(Field field) if (type == Field::Types::String) { - auto & str = field.get(); + auto & str = field.safeGet(); UInt64 action = fuzz_rand() % 10; switch (action) { @@ -158,7 +158,7 @@ Field QueryFuzzer::fuzzField(Field field) } else if (type == Field::Types::Array) { - auto & arr = field.get(); + auto & arr = field.safeGet(); if (fuzz_rand() % 5 == 0 && !arr.empty()) { @@ -193,7 +193,7 @@ Field QueryFuzzer::fuzzField(Field field) } else if (type == Field::Types::Tuple) { - auto & arr = field.get(); + auto & arr = field.safeGet(); if (fuzz_rand() % 5 == 0 && !arr.empty()) { @@ -922,17 +922,17 @@ ASTPtr QueryFuzzer::fuzzLiteralUnderExpressionList(ASTPtr child) auto type = l->value.getType(); if (type == Field::Types::Which::String && fuzz_rand() % 7 == 0) { - String value = l->value.get(); + String value = l->value.safeGet(); child = makeASTFunction( "toFixedString", std::make_shared(value), std::make_shared(static_cast(value.size()))); } else if (type == Field::Types::Which::UInt64 && fuzz_rand() % 7 == 0) { - child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toUInt128" : "toUInt256", std::make_shared(l->value.get())); + child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toUInt128" : "toUInt256", std::make_shared(l->value.safeGet())); } else if (type == Field::Types::Which::Int64 && fuzz_rand() % 7 == 0) { - child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toInt128" : "toInt256", std::make_shared(l->value.get())); + child = makeASTFunction(fuzz_rand() % 2 == 0 ? "toInt128" : "toInt256", std::make_shared(l->value.safeGet())); } else if (type == Field::Types::Which::Float64 && fuzz_rand() % 7 == 0) { @@ -940,22 +940,22 @@ ASTPtr QueryFuzzer::fuzzLiteralUnderExpressionList(ASTPtr child) if (decimal == 0) child = makeASTFunction( "toDecimal32", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 9))); else if (decimal == 1) child = makeASTFunction( "toDecimal64", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 18))); else if (decimal == 2) child = makeASTFunction( "toDecimal128", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 38))); else child = makeASTFunction( "toDecimal256", - std::make_shared(l->value.get()), + std::make_shared(l->value.safeGet()), std::make_shared(static_cast(fuzz_rand() % 76))); } diff --git a/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp b/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp index 01aa7df48d3..6b9f6318903 100644 --- a/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp +++ b/src/Common/Scheduler/Nodes/DynamicResourceManager.cpp @@ -184,14 +184,20 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi // Resource update leads to loss of runtime data of nodes and may lead to temporary violation of constraints (e.g. limits) // Try to minimise this by reusing "equal" resources (initialized with the same configuration). + std::vector resources_to_attach; for (auto & [name, new_resource] : new_state->resources) { if (auto iter = state->resources.find(name); iter != state->resources.end()) // Resource update { State::ResourcePtr old_resource = iter->second; if (old_resource->equals(*new_resource)) + { new_resource = old_resource; // Rewrite with older version to avoid loss of runtime data + continue; + } } + // It is new or updated resource + resources_to_attach.emplace_back(new_resource); } // Commit new state @@ -199,17 +205,14 @@ void DynamicResourceManager::updateConfiguration(const Poco::Util::AbstractConfi state = new_state; // Attach new and updated resources to the scheduler - for (auto & [name, resource] : new_state->resources) + for (auto & resource : resources_to_attach) { const SchedulerNodePtr & root = resource->nodes.find("/")->second.ptr; - if (root->parent == nullptr) + resource->attached_to = &scheduler; + scheduler.event_queue->enqueue([this, root] { - resource->attached_to = &scheduler; - scheduler.event_queue->enqueue([this, root] - { - scheduler.attachChild(root); - }); - } + scheduler.attachChild(root); + }); } // NOTE: after mutex unlock `state` became available for Classifier(s) and must be immutable diff --git a/src/Common/ShellCommand.cpp b/src/Common/ShellCommand.cpp index 98a21b43d76..0d41669816c 100644 --- a/src/Common/ShellCommand.cpp +++ b/src/Common/ShellCommand.cpp @@ -237,7 +237,14 @@ std::unique_ptr ShellCommand::executeImpl( res->write_fds.emplace(fd, fds.fds_rw[1]); } - LOG_TRACE(getLogger(), "Started shell command '{}' with pid {}", filename, pid); + LOG_TRACE( + getLogger(), + "Started shell command '{}' with pid {} and file descriptors: out {}, err {}", + filename, + pid, + res->out.getFD(), + res->err.getFD()); + return res; } diff --git a/src/Common/SystemLogBase.cpp b/src/Common/SystemLogBase.cpp index 7d2c15714e2..127c8862a35 100644 --- a/src/Common/SystemLogBase.cpp +++ b/src/Common/SystemLogBase.cpp @@ -27,12 +27,14 @@ #include #include + namespace DB { namespace ErrorCodes { extern const int TIMEOUT_EXCEEDED; + extern const int ABORTED; } ISystemLog::~ISystemLog() = default; @@ -86,32 +88,18 @@ void SystemLogQueue::push(LogElement&& element) // by one, under exclusive lock, so we will see each message count. // It is enough to only wake the flushing thread once, after the message // count increases past half available size. - const uint64_t queue_end = queue_front_index + queue.size(); - requested_flush_up_to = std::max(requested_flush_up_to, queue_end); - flush_event.notify_all(); + const auto last_log_index = queue_front_index + queue.size(); + notifyFlushUnlocked(last_log_index, /* should_prepare_tables_anyway */ false); } if (queue.size() >= settings.max_size_rows) { + chassert(queue.size() == settings.max_size_rows); + // Ignore all further entries until the queue is flushed. - // Log a message about that. Don't spam it -- this might be especially - // problematic in case of trace log. Remember what the front index of the - // queue was when we last logged the message. If it changed, it means the - // queue was flushed, and we can log again. - if (queue_front_index != logged_queue_full_at_index) - { - logged_queue_full_at_index = queue_front_index; - - // TextLog sets its logger level to 0, so this log is a noop and - // there is no recursive logging. - lock.unlock(); - LOG_ERROR(log, "Queue is full for system log '{}' at {}. max_size_rows {}", - demangle(typeid(*this).name()), - queue_front_index, - settings.max_size_rows); - } - + // To the next batch we add a log message about how much we have lost + ++ignored_logs; return; } @@ -127,20 +115,50 @@ template void SystemLogQueue::handleCrash() { if (settings.notify_flush_on_crash) - notifyFlush(/* force */ true); + { + notifyFlush(getLastLogIndex(), /* should_prepare_tables_anyway */ true); + } } template -void SystemLogQueue::waitFlush(uint64_t expected_flushed_up_to) +void SystemLogQueue::notifyFlushUnlocked(Index expected_flushed_index, bool should_prepare_tables_anyway) { + if (should_prepare_tables_anyway) + requested_prepare_tables = std::max(requested_prepare_tables, expected_flushed_index); + + requested_flush_index = std::max(requested_flush_index, expected_flushed_index); + + flush_event.notify_all(); +} + +template +void SystemLogQueue::notifyFlush(SystemLogQueue::Index expected_flushed_index, bool should_prepare_tables_anyway) +{ + std::lock_guard lock(mutex); + notifyFlushUnlocked(expected_flushed_index, should_prepare_tables_anyway); +} + +template +void SystemLogQueue::waitFlush(SystemLogQueue::Index expected_flushed_index, bool should_prepare_tables_anyway) +{ + LOG_DEBUG(log, "Requested flush up to offset {}", expected_flushed_index); + // Use an arbitrary timeout to avoid endless waiting. 60s proved to be // too fast for our parallel functional tests, probably because they // heavily load the disk. const int timeout_seconds = 180; + std::unique_lock lock(mutex); - bool result = flush_event.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] + + // there is no obligation to call notifyFlush before waitFlush, than we have to be sure that flush_event has been triggered before we wait the result + notifyFlushUnlocked(expected_flushed_index, should_prepare_tables_anyway); + + auto result = confirm_event.wait_for(lock, std::chrono::seconds(timeout_seconds), [&] { - return flushed_up_to >= expected_flushed_up_to && !is_force_prepare_tables; + if (should_prepare_tables_anyway) + return (flushed_index >= expected_flushed_index && prepared_tables >= requested_prepare_tables) || is_shutdown; + else + return (flushed_index >= expected_flushed_index) || is_shutdown; }); if (!result) @@ -148,67 +166,63 @@ void SystemLogQueue::waitFlush(uint64_t expected_flushed_up_to) throw Exception(ErrorCodes::TIMEOUT_EXCEEDED, "Timeout exceeded ({} s) while flushing system log '{}'.", toString(timeout_seconds), demangle(typeid(*this).name())); } -} - -template -uint64_t SystemLogQueue::notifyFlush(bool should_prepare_tables_anyway) -{ - uint64_t this_thread_requested_offset; + if (is_shutdown) { - std::lock_guard lock(mutex); - if (is_shutdown) - return uint64_t(-1); - - this_thread_requested_offset = queue_front_index + queue.size(); - - // Publish our flush request, taking care not to overwrite the requests - // made by other threads. - is_force_prepare_tables |= should_prepare_tables_anyway; - requested_flush_up_to = std::max(requested_flush_up_to, this_thread_requested_offset); - - flush_event.notify_all(); + throw Exception(ErrorCodes::ABORTED, "Shutdown has been called while flushing system log '{}'. Aborting.", + demangle(typeid(*this).name())); } - - LOG_DEBUG(log, "Requested flush up to offset {}", this_thread_requested_offset); - return this_thread_requested_offset; } template -void SystemLogQueue::confirm(uint64_t to_flush_end) +SystemLogQueue::Index SystemLogQueue::getLastLogIndex() { std::lock_guard lock(mutex); - flushed_up_to = to_flush_end; - is_force_prepare_tables = false; - flush_event.notify_all(); + return queue_front_index + queue.size(); } template -typename SystemLogQueue::Index SystemLogQueue::pop(std::vector & output, - bool & should_prepare_tables_anyway, - bool & exit_this_thread) +void SystemLogQueue::confirm(SystemLogQueue::Index last_flashed_index) { - /// Call dtors and deallocate strings without holding the global lock - output.resize(0); + std::lock_guard lock(mutex); + prepared_tables = std::max(prepared_tables, last_flashed_index); + flushed_index = std::max(flushed_index, last_flashed_index); + confirm_event.notify_all(); +} - std::unique_lock lock(mutex); - flush_event.wait_for(lock, - std::chrono::milliseconds(settings.flush_interval_milliseconds), - [&] () +template +typename SystemLogQueue::PopResult SystemLogQueue::pop() +{ + PopResult result; + size_t prev_ignored_logs = 0; + + { + std::unique_lock lock(mutex); + + flush_event.wait_for(lock, std::chrono::milliseconds(settings.flush_interval_milliseconds), [&] () { - return requested_flush_up_to > flushed_up_to || is_shutdown || is_force_prepare_tables; - } - ); + return requested_flush_index > flushed_index || requested_prepare_tables > prepared_tables || is_shutdown; + }); - queue_front_index += queue.size(); - // Swap with existing array from previous flush, to save memory - // allocations. - queue.swap(output); + if (is_shutdown) + return PopResult{.is_shutdown = true}; - should_prepare_tables_anyway = is_force_prepare_tables; + queue_front_index += queue.size(); + prev_ignored_logs = ignored_logs; + ignored_logs = 0; - exit_this_thread = is_shutdown; - return queue_front_index; + result.last_log_index = queue_front_index; + result.logs.swap(queue); + result.create_table_force = requested_prepare_tables > prepared_tables; + } + + if (prev_ignored_logs) + LOG_ERROR(log, "Queue had been full at {}, accepted {} logs, ignored {} logs.", + result.last_log_index - result.logs.size(), + result.logs.size(), + prev_ignored_logs); + + return result; } template @@ -229,13 +243,21 @@ SystemLogBase::SystemLogBase( } template -void SystemLogBase::flush(bool force) +SystemLogBase::Index SystemLogBase::getLastLogIndex() { - uint64_t this_thread_requested_offset = queue->notifyFlush(force); - if (this_thread_requested_offset == uint64_t(-1)) - return; + return queue->getLastLogIndex(); +} - queue->waitFlush(this_thread_requested_offset); +template +void SystemLogBase::notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway) +{ + queue->notifyFlush(expected_flushed_index, should_prepare_tables_anyway); +} + +template +void SystemLogBase::flush(Index expected_flushed_index, bool should_prepare_tables_anyway) +{ + queue->waitFlush(expected_flushed_index, should_prepare_tables_anyway); } template @@ -257,9 +279,6 @@ void SystemLogBase::add(LogElement element) queue->push(std::move(element)); } -template -void SystemLogBase::notifyFlush(bool force) { queue->notifyFlush(force); } - #define INSTANTIATE_SYSTEM_LOG_BASE(ELEMENT) template class SystemLogBase; SYSTEM_LOG_ELEMENTS(INSTANTIATE_SYSTEM_LOG_BASE) diff --git a/src/Common/SystemLogBase.h b/src/Common/SystemLogBase.h index b87fcf419d3..0d7b04d5c57 100644 --- a/src/Common/SystemLogBase.h +++ b/src/Common/SystemLogBase.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include #include @@ -54,10 +55,19 @@ struct StorageID; class ISystemLog { public: + using Index = int64_t; + virtual String getName() const = 0; - //// force -- force table creation (used for SYSTEM FLUSH LOGS) - virtual void flush(bool force = false) = 0; /// NOLINT + /// Return the index of the latest added log element. That index no less than the flashed index. + /// The flashed index is the index of the last log element which has been flushed successfully. + /// Thereby all the records whose index is less than the flashed index are flushed already. + virtual Index getLastLogIndex() = 0; + /// Call this method to wake up the flush thread and flush the data in the background. It is non blocking call + virtual void notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway) = 0; + /// Call this method to wait intill the logs are flushed up to expected_flushed_index. It is blocking call. + virtual void flush(Index expected_flushed_index, bool should_prepare_tables_anyway) = 0; + virtual void prepareTable() = 0; /// Start the background thread. @@ -97,26 +107,38 @@ struct SystemLogQueueSettings template class SystemLogQueue { - using Index = uint64_t; - public: + using Index = ISystemLog::Index; + explicit SystemLogQueue(const SystemLogQueueSettings & settings_); void shutdown(); // producer methods void push(LogElement && element); - Index notifyFlush(bool should_prepare_tables_anyway); - void waitFlush(Index expected_flushed_up_to); + + Index getLastLogIndex(); + void notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway); + void waitFlush(Index expected_flushed_index, bool should_prepare_tables_anyway); /// Handles crash, flushes log without blocking if notify_flush_on_crash is set void handleCrash(); + struct PopResult + { + Index last_log_index = 0; + std::vector logs = {}; + bool create_table_force = false; + bool is_shutdown = false; + }; + // consumer methods - Index pop(std::vector& output, bool & should_prepare_tables_anyway, bool & exit_this_thread); - void confirm(Index to_flush_end); + PopResult pop(); + void confirm(Index last_flashed_index); private: + void notifyFlushUnlocked(Index expected_flushed_index, bool should_prepare_tables_anyway); + /// Data shared between callers of add()/flush()/shutdown(), and the saving thread std::mutex mutex; @@ -124,22 +146,32 @@ private: // Queue is bounded. But its size is quite large to not block in all normal cases. std::vector queue; + // An always-incrementing index of the first message currently in the queue. // We use it to give a global sequential index to every message, so that we // can wait until a particular message is flushed. This is used to implement // synchronous log flushing for SYSTEM FLUSH LOGS. Index queue_front_index = 0; - // A flag that says we must create the tables even if the queue is empty. - bool is_force_prepare_tables = false; + // Requested to flush logs up to this index, exclusive - Index requested_flush_up_to = 0; + Index requested_flush_index = std::numeric_limits::min(); // Flushed log up to this index, exclusive - Index flushed_up_to = 0; - // Logged overflow message at this queue front index - Index logged_queue_full_at_index = -1; + Index flushed_index = 0; + + // The same logic for the prepare tables: if requested_prepar_tables > prepared_tables we need to do prepare + // except that initial prepared_tables is -1 + // it is due to the difference: when no logs have been written and we call flush logs + // it becomes in the state: requested_flush_index = 0 and flushed_index = 0 -- we do not want to do anything + // but if we need to prepare tables it becomes requested_prepare_tables = 0 and prepared_tables = -1 + // we trigger background thread and do prepare + Index requested_prepare_tables = std::numeric_limits::min(); + Index prepared_tables = -1; + + size_t ignored_logs = 0; bool is_shutdown = false; + std::condition_variable confirm_event; std::condition_variable flush_event; const SystemLogQueueSettings settings; @@ -150,6 +182,7 @@ template class SystemLogBase : public ISystemLog { public: + using Index = ISystemLog::Index; using Self = SystemLogBase; explicit SystemLogBase( @@ -163,15 +196,16 @@ public: */ void add(LogElement element); + Index getLastLogIndex() override; + + void notifyFlush(Index expected_flushed_index, bool should_prepare_tables_anyway) override; + /// Flush data in the buffer to disk. Block the thread until the data is stored on disk. - void flush(bool force) override; + void flush(Index expected_flushed_index, bool should_prepare_tables_anyway) override; /// Handles crash, flushes log without blocking if notify_flush_on_crash is set void handleCrash() override; - /// Non-blocking flush data in the buffer to disk. - void notifyFlush(bool force); - String getName() const override { return LogElement::name(); } static const char * getDefaultOrderBy() { return "event_date, event_time"; } diff --git a/src/Common/config.h.in b/src/Common/config.h.in index 6a0090130a3..2e3b8d84366 100644 --- a/src/Common/config.h.in +++ b/src/Common/config.h.in @@ -32,6 +32,8 @@ #cmakedefine01 USE_IDNA #cmakedefine01 USE_NLP #cmakedefine01 USE_VECTORSCAN +#cmakedefine01 USE_QPL +#cmakedefine01 USE_QATLIB #cmakedefine01 USE_LIBURING #cmakedefine01 USE_AVRO #cmakedefine01 USE_CAPNP @@ -56,10 +58,11 @@ #cmakedefine01 USE_FILELOG #cmakedefine01 USE_ODBC #cmakedefine01 USE_BLAKE3 +#cmakedefine01 USE_USEARCH #cmakedefine01 USE_SKIM #cmakedefine01 USE_PRQL #cmakedefine01 USE_ULID -#cmakedefine01 FIU_ENABLE +#cmakedefine01 USE_LIBFIU #cmakedefine01 USE_BCRYPT #cmakedefine01 USE_LIBARCHIVE #cmakedefine01 USE_POCKETFFT diff --git a/src/Common/examples/arena_with_free_lists.cpp b/src/Common/examples/arena_with_free_lists.cpp index 6793d567aca..3a1304e2d94 100644 --- a/src/Common/examples/arena_with_free_lists.cpp +++ b/src/Common/examples/arena_with_free_lists.cpp @@ -174,19 +174,19 @@ struct Dictionary { switch (attribute.type) { - case AttributeUnderlyingTypeTest::UInt8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::UInt16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::UInt32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; - case AttributeUnderlyingTypeTest::UInt64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int8: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int16: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Int32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; - case AttributeUnderlyingTypeTest::Int64: std::get>(attribute.arrays)[idx] = value.get(); break; - case AttributeUnderlyingTypeTest::Float32: std::get>(attribute.arrays)[idx] = static_cast(value.get()); break; - case AttributeUnderlyingTypeTest::Float64: std::get>(attribute.arrays)[idx] = value.get(); break; + case AttributeUnderlyingTypeTest::UInt8: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::UInt16: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::UInt32: std::get>(attribute.arrays)[idx] = static_cast(value.safeGet()); break; + case AttributeUnderlyingTypeTest::UInt64: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Int8: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Int16: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Int32: std::get>(attribute.arrays)[idx] = static_cast(value.safeGet()); break; + case AttributeUnderlyingTypeTest::Int64: std::get>(attribute.arrays)[idx] = value.safeGet(); break; + case AttributeUnderlyingTypeTest::Float32: std::get>(attribute.arrays)[idx] = static_cast(value.safeGet()); break; + case AttributeUnderlyingTypeTest::Float64: std::get>(attribute.arrays)[idx] = value.safeGet(); break; case AttributeUnderlyingTypeTest::String: { - const auto & string = value.get(); + const auto & string = value.safeGet(); auto & string_ref = std::get>(attribute.arrays)[idx]; const auto & null_value_ref = std::get(attribute.null_values); diff --git a/src/Compression/CompressionCodecDeflateQpl.cpp b/src/Compression/CompressionCodecDeflateQpl.cpp index f1b5b24e866..c82ee861a6f 100644 --- a/src/Compression/CompressionCodecDeflateQpl.cpp +++ b/src/Compression/CompressionCodecDeflateQpl.cpp @@ -1,7 +1,3 @@ -#ifdef ENABLE_QPL_COMPRESSION - -#include -#include #include #include #include @@ -11,6 +7,10 @@ #include #include #include +#include +#include + +#if USE_QPL #include "libaccel_config.h" diff --git a/src/Compression/CompressionCodecDeflateQpl.h b/src/Compression/CompressionCodecDeflateQpl.h index 86fd9051bd8..d9abc0fb7e0 100644 --- a/src/Compression/CompressionCodecDeflateQpl.h +++ b/src/Compression/CompressionCodecDeflateQpl.h @@ -4,6 +4,11 @@ #include #include #include + +#include "config.h" + +#if USE_QPL + #include namespace Poco @@ -117,3 +122,4 @@ private: }; } +#endif diff --git a/src/Compression/CompressionCodecZSTDQAT.cpp b/src/Compression/CompressionCodecZSTDQAT.cpp index 5a4ef70a30a..e19b7e4a001 100644 --- a/src/Compression/CompressionCodecZSTDQAT.cpp +++ b/src/Compression/CompressionCodecZSTDQAT.cpp @@ -1,4 +1,6 @@ -#ifdef ENABLE_ZSTD_QAT_CODEC +#include "config.h" + +#if USE_QATLIB #include #include @@ -6,6 +8,7 @@ #include #include #include + #include #include diff --git a/src/Compression/CompressionFactory.cpp b/src/Compression/CompressionFactory.cpp index 2e7aa0d086f..ac00f571568 100644 --- a/src/Compression/CompressionFactory.cpp +++ b/src/Compression/CompressionFactory.cpp @@ -1,20 +1,20 @@ -#include "config.h" - #include +#include +#include +#include +#include #include #include #include -#include -#include -#include -#include #include -#include -#include -#include +#include +#include +#include #include +#include "config.h" + namespace DB { @@ -175,11 +175,11 @@ void registerCodecNone(CompressionCodecFactory & factory); void registerCodecLZ4(CompressionCodecFactory & factory); void registerCodecLZ4HC(CompressionCodecFactory & factory); void registerCodecZSTD(CompressionCodecFactory & factory); -#ifdef ENABLE_ZSTD_QAT_CODEC +#if USE_QATLIB void registerCodecZSTDQAT(CompressionCodecFactory & factory); #endif void registerCodecMultiple(CompressionCodecFactory & factory); -#ifdef ENABLE_QPL_COMPRESSION +#if USE_QPL void registerCodecDeflateQpl(CompressionCodecFactory & factory); #endif @@ -198,7 +198,7 @@ CompressionCodecFactory::CompressionCodecFactory() registerCodecNone(*this); registerCodecLZ4(*this); registerCodecZSTD(*this); -#ifdef ENABLE_ZSTD_QAT_CODEC +#if USE_QATLIB registerCodecZSTDQAT(*this); #endif registerCodecLZ4HC(*this); @@ -209,7 +209,7 @@ CompressionCodecFactory::CompressionCodecFactory() registerCodecGorilla(*this); registerCodecEncrypted(*this); registerCodecFPC(*this); -#ifdef ENABLE_QPL_COMPRESSION +#if USE_QPL registerCodecDeflateQpl(*this); #endif registerCodecGCD(*this); diff --git a/src/Core/ExternalTable.cpp b/src/Core/ExternalTable.cpp index af681cd5639..c2bcf6ec651 100644 --- a/src/Core/ExternalTable.cpp +++ b/src/Core/ExternalTable.cpp @@ -49,7 +49,7 @@ ExternalTableDataPtr BaseExternalTable::getData(ContextPtr context) { initReadBuffer(); initSampleBlock(); - auto input = context->getInputFormat(format, *read_buffer, sample_block, context->getSettingsRef().get("max_block_size").get()); + auto input = context->getInputFormat(format, *read_buffer, sample_block, context->getSettingsRef().get("max_block_size").safeGet()); auto data = std::make_unique(); data->pipe = std::make_unique(); diff --git a/src/Core/Field.h b/src/Core/Field.h index f1bb4a72b0d..ba8c66580ad 100644 --- a/src/Core/Field.h +++ b/src/Core/Field.h @@ -457,15 +457,6 @@ public: std::string_view getTypeName() const; bool isNull() const { return which == Types::Null; } - template - NearestFieldType> & get(); - - template - const auto & get() const - { - auto * mutable_this = const_cast *>(this); - return mutable_this->get(); - } bool isNegativeInfinity() const { return which == Types::Null && get().isNegativeInfinity(); } bool isPositiveInfinity() const { return which == Types::Null && get().isPositiveInfinity(); } @@ -681,6 +672,25 @@ private: Types::Which which; + /// This function is prone to type punning and should never be used outside of Field class, + /// whenever it is used within this class the stored type should be checked in advance. + template + NearestFieldType> & get() + { + // Before storing the value in the Field, we static_cast it to the field + // storage type, so here we return the value of storage type as well. + // Otherwise, it is easy to make a mistake of reinterpret_casting the stored + // value to a different and incompatible type. + // For example, a Float32 value is stored as Float64, and it is incorrect to + // return a reference to this value as Float32. + return *reinterpret_cast>*>(&storage); + } + + template + NearestFieldType> & get() const + { + return const_cast(this)->get(); + } /// Assuming there was no allocated state or it was deallocated (see destroy). template @@ -866,48 +876,21 @@ constexpr bool isInt64OrUInt64orBoolFieldType(Field::Types::Which t) || t == Field::Types::Bool; } -// Field value getter with type checking in debug builds. -template -NearestFieldType> & Field::get() -{ - // Before storing the value in the Field, we static_cast it to the field - // storage type, so here we return the value of storage type as well. - // Otherwise, it is easy to make a mistake of reinterpret_casting the stored - // value to a different and incompatible type. - // For example, a Float32 value is stored as Float64, and it is incorrect to - // return a reference to this value as Float32. - using StoredType = NearestFieldType>; - -#ifndef NDEBUG - // Disregard signedness when converting between int64 types. - constexpr Field::Types::Which target = TypeToEnum::value; - if (target != which - && (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which)) && target != Field::Types::IPv4) - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Invalid Field get from type {} to type {}", which, target); -#endif - - StoredType * MAY_ALIAS ptr = reinterpret_cast(&storage); - - return *ptr; -} - - template auto & Field::safeGet() { const Types::Which target = TypeToEnum>>::value; - /// We allow converting int64 <-> uint64, int64 <-> bool, uint64 <-> bool in safeGet(). - if (target != which - && (!isInt64OrUInt64orBoolFieldType(target) || !isInt64OrUInt64orBoolFieldType(which))) - throw Exception(ErrorCodes::BAD_GET, - "Bad get: has {}, requested {}", getTypeName(), target); + /// bool is stored as uint64, will be returned as UInt64 when requested as bool or UInt64, as Int64 when requested as Int64 + /// also allow UInt64 <-> Int64 conversion + if (target != which && + !(which == Field::Types::Bool && (target == Field::Types::UInt64 || target == Field::Types::Int64)) && + !(isInt64OrUInt64FieldType(which) && isInt64OrUInt64FieldType(target))) + throw Exception(ErrorCodes::BAD_GET, "Bad get: has {}, requested {}", getTypeName(), target); return get(); } - template requires not_field_or_bool_or_stringlike Field::Field(T && rhs) diff --git a/src/Core/ProtocolDefines.h b/src/Core/ProtocolDefines.h index 02d54221ed3..790987272fa 100644 --- a/src/Core/ProtocolDefines.h +++ b/src/Core/ProtocolDefines.h @@ -83,6 +83,9 @@ static constexpr auto DBMS_MIN_REVISION_WITH_SYSTEM_KEYWORDS_TABLE = 54468; static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469; +/// Packets size header +static constexpr auto DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS = 54470; + /// Version of ClickHouse TCP protocol. /// /// Should be incremented manually on protocol changes. @@ -90,6 +93,6 @@ static constexpr auto DBMS_MIN_REVISION_WITH_ROWS_BEFORE_AGGREGATION = 54469; /// NOTE: DBMS_TCP_PROTOCOL_VERSION has nothing common with VERSION_REVISION, /// later is just a number for server version (one number instead of commit SHA) /// for simplicity (sometimes it may be more convenient in some use cases). -static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54469; +static constexpr auto DBMS_TCP_PROTOCOL_VERSION = 54470; } diff --git a/src/Core/Range.cpp b/src/Core/Range.cpp index 956b96653a1..1a5ce1e012e 100644 --- a/src/Core/Range.cpp +++ b/src/Core/Range.cpp @@ -62,27 +62,27 @@ void Range::shrinkToIncludedIfPossible() { if (left.isExplicit() && !left_included) { - if (left.getType() == Field::Types::UInt64 && left.get() != std::numeric_limits::max()) + if (left.getType() == Field::Types::UInt64 && left.safeGet() != std::numeric_limits::max()) { - ++left.get(); + ++left.safeGet(); left_included = true; } - if (left.getType() == Field::Types::Int64 && left.get() != std::numeric_limits::max()) + if (left.getType() == Field::Types::Int64 && left.safeGet() != std::numeric_limits::max()) { - ++left.get(); + ++left.safeGet(); left_included = true; } } if (right.isExplicit() && !right_included) { - if (right.getType() == Field::Types::UInt64 && right.get() != std::numeric_limits::min()) + if (right.getType() == Field::Types::UInt64 && right.safeGet() != std::numeric_limits::min()) { - --right.get(); + --right.safeGet(); right_included = true; } - if (right.getType() == Field::Types::Int64 && right.get() != std::numeric_limits::min()) + if (right.getType() == Field::Types::Int64 && right.safeGet() != std::numeric_limits::min()) { - --right.get(); + --right.safeGet(); right_included = true; } } diff --git a/src/Core/ServerSettings.h b/src/Core/ServerSettings.h index d13e6251ca9..256f78ddea0 100644 --- a/src/Core/ServerSettings.h +++ b/src/Core/ServerSettings.h @@ -134,6 +134,7 @@ namespace DB M(Bool, async_load_databases, false, "Enable asynchronous loading of databases and tables to speedup server startup. Queries to not yet loaded entity will be blocked until load is finished.", 0) \ M(Bool, display_secrets_in_show_and_select, false, "Allow showing secrets in SHOW and SELECT queries via a format setting and a grant", 0) \ M(Seconds, keep_alive_timeout, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT, "The number of seconds that ClickHouse waits for incoming requests before closing the connection.", 0) \ + M(UInt64, max_keep_alive_requests, 10000, "The maximum number of requests handled via a single http keepalive connection before the server closes this connection.", 0) \ M(Seconds, replicated_fetches_http_connection_timeout, 0, "HTTP connection timeout for part fetch requests. Inherited from default profile `http_connection_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_send_timeout, 0, "HTTP send timeout for part fetch requests. Inherited from default profile `http_send_timeout` if not set explicitly.", 0) \ M(Seconds, replicated_fetches_http_receive_timeout, 0, "HTTP receive timeout for fetch part requests. Inherited from default profile `http_receive_timeout` if not set explicitly.", 0) \ diff --git a/src/Core/Settings.cpp b/src/Core/Settings.cpp index 9c9c9c1db00..45bd2b9eb42 100644 --- a/src/Core/Settings.cpp +++ b/src/Core/Settings.cpp @@ -118,7 +118,7 @@ void Settings::set(std::string_view name, const Field & value) { if (value.getType() != Field::Types::Which::String) throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unexpected type of value for setting 'compatibility'. Expected String, got {}", value.getTypeName()); - applyCompatibilitySetting(value.get()); + applyCompatibilitySetting(value.safeGet()); } /// If we change setting that was changed by compatibility setting before /// we should remove it from settings_changed_by_compatibility_setting, diff --git a/src/Core/Settings.h b/src/Core/Settings.h index 817d4192e96..6f253972351 100644 --- a/src/Core/Settings.h +++ b/src/Core/Settings.h @@ -593,7 +593,6 @@ class IColumn; M(UInt64, mutations_sync, 0, "Wait for synchronous execution of ALTER TABLE UPDATE/DELETE queries (mutations). 0 - execute asynchronously. 1 - wait current server. 2 - wait all replicas if they exist.", 0) \ M(Bool, enable_lightweight_delete, true, "Enable lightweight DELETE mutations for mergetree tables.", 0) ALIAS(allow_experimental_lightweight_delete) \ M(UInt64, lightweight_deletes_sync, 2, "The same as 'mutation_sync', but controls only execution of lightweight deletes", 0) \ - M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop all projection related to this table then do lightweight delete.", 0) \ M(Bool, apply_deleted_mask, true, "Enables filtering out rows deleted with lightweight DELETE. If disabled, a query will be able to read those rows. This is useful for debugging and \"undelete\" scenarios", 0) \ M(Bool, optimize_normalize_count_variants, true, "Rewrite aggregate functions that semantically equals to count() as count().", 0) \ M(Bool, optimize_injective_functions_inside_uniq, true, "Delete injective functions of one argument inside uniq*() functions.", 0) \ @@ -605,7 +604,7 @@ class IColumn; M(Bool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \ M(Bool, optimize_multiif_to_if, true, "Replace 'multiIf' with only one condition to 'if'.", 0) \ M(Bool, optimize_if_transform_strings_to_enum, false, "Replaces string-type arguments in If and Transform to enum. Disabled by default cause it could make inconsistent change in distributed query that would lead to its fail.", 0) \ - M(Bool, optimize_functions_to_subcolumns, false, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ + M(Bool, optimize_functions_to_subcolumns, true, "Transform functions to subcolumns, if possible, to reduce amount of read data. E.g. 'length(arr)' -> 'arr.size0', 'col IS NULL' -> 'col.null' ", 0) \ M(Bool, optimize_using_constraints, false, "Use constraints for query optimization", 0) \ M(Bool, optimize_substitute_columns, false, "Use constraints for column substitution", 0) \ M(Bool, optimize_append_index, false, "Use constraints in order to append index condition (indexHint)", 0) \ @@ -676,6 +675,7 @@ class IColumn; M(Bool, query_cache_squash_partial_results, true, "Squash partial result blocks to blocks of size 'max_block_size'. Reduces performance of inserts into the query cache but improves the compressability of cache entries.", 0) \ M(Seconds, query_cache_ttl, 60, "After this time in seconds entries in the query cache become stale", 0) \ M(Bool, query_cache_share_between_users, false, "Allow other users to read entry in the query cache", 0) \ + M(String, query_cache_tag, "", "A string which acts as a label for query cache entries. The same queries with different tags are considered different by the query cache.", 0) \ M(Bool, enable_sharing_sets_for_mutations, true, "Allow sharing set objects build for IN subqueries between different tasks of the same mutation. This reduces memory usage and CPU consumption", 0) \ \ M(Bool, optimize_rewrite_sum_if_to_count_if, true, "Rewrite sumIf() and sum(if()) function countIf() function when logically equivalent", 0) \ @@ -907,20 +907,18 @@ class IColumn; M(Bool, allow_experimental_hash_functions, false, "Enable experimental hash functions", 0) \ M(Bool, allow_experimental_object_type, false, "Allow Object and JSON data types", 0) \ M(Bool, allow_experimental_time_series_table, false, "Allows experimental TimeSeries table engine", 0) \ + M(Bool, allow_experimental_vector_similarity_index, false, "Allow experimental vector similarity index", 0) \ M(Bool, allow_experimental_variant_type, false, "Allow Variant data type", 0) \ M(Bool, allow_experimental_dynamic_type, false, "Allow Dynamic data type", 0) \ - M(Bool, allow_experimental_annoy_index, false, "Allows to use Annoy index. Disabled by default because this feature is experimental", 0) \ - M(Bool, allow_experimental_usearch_index, false, "Allows to use USearch index. Disabled by default because this feature is experimental", 0) \ M(Bool, allow_experimental_codecs, false, "If it is set to true, allow to specify experimental compression codecs (but we don't have those yet and this option does nothing).", 0) \ M(UInt64, max_limit_for_ann_queries, 1'000'000, "SELECT queries with LIMIT bigger than this setting cannot use ANN indexes. Helps to prevent memory overflows in ANN search indexes.", 0) \ - M(UInt64, max_threads_for_annoy_index_creation, 4, "Number of threads used to build Annoy indexes (0 means all cores, not recommended)", 0) \ - M(Int64, annoy_index_search_k_nodes, -1, "SELECT queries search up to this many nodes in Annoy indexes.", 0) \ M(Bool, throw_on_unsupported_query_inside_transaction, true, "Throw exception if unsupported query is used inside transaction", 0) \ M(TransactionsWaitCSNMode, wait_changes_become_visible_after_commit_mode, TransactionsWaitCSNMode::WAIT_UNKNOWN, "Wait for committed changes to become actually visible in the latest snapshot", 0) \ M(Bool, implicit_transaction, false, "If enabled and not already inside a transaction, wraps the query inside a full transaction (begin + commit or rollback)", 0) \ M(UInt64, grace_hash_join_initial_buckets, 1, "Initial number of grace hash join buckets", 0) \ M(UInt64, grace_hash_join_max_buckets, 1024, "Limit on the number of grace hash join buckets", 0) \ M(Timezone, session_timezone, "", "This setting can be removed in the future due to potential caveats. It is experimental and is not suitable for production usage. The default timezone for current session or query. The server default timezone if empty.", 0) \ + M(Bool, use_hive_partitioning, false, "Allows to use hive partitioning for File, URL, S3, AzureBlobStorage and HDFS engines.", 0)\ \ M(Bool, allow_statistics_optimize, false, "Allows using statistics to optimize queries", 0) ALIAS(allow_statistic_optimize) \ M(Bool, allow_experimental_statistics, false, "Allows using statistics", 0) ALIAS(allow_experimental_statistic) \ @@ -1036,6 +1034,10 @@ class IColumn; MAKE_OBSOLETE(M, UInt64, parallel_replicas_min_number_of_granules_to_enable, 0) \ MAKE_OBSOLETE(M, Bool, query_plan_optimize_projection, true) \ MAKE_OBSOLETE(M, Bool, query_cache_store_results_of_queries_with_nondeterministic_functions, false) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_annoy_index, false) \ + MAKE_OBSOLETE(M, UInt64, max_threads_for_annoy_index_creation, 4) \ + MAKE_OBSOLETE(M, Int64, annoy_index_search_k_nodes, -1) \ + MAKE_OBSOLETE(M, Bool, allow_experimental_usearch_index, false) \ MAKE_OBSOLETE(M, Bool, optimize_move_functions_out_of_any, false) \ MAKE_OBSOLETE(M, Bool, allow_experimental_undrop_table_query, true) \ MAKE_OBSOLETE(M, Bool, allow_experimental_s3queue, true) \ diff --git a/src/Core/SettingsChangesHistory.cpp b/src/Core/SettingsChangesHistory.cpp index e4662bc5244..c2314450546 100644 --- a/src/Core/SettingsChangesHistory.cpp +++ b/src/Core/SettingsChangesHistory.cpp @@ -71,6 +71,7 @@ static std::initializer_list(f.get()); + return stringToNumber(f.safeGet()); } else if (f.getType() == Field::Types::UInt64) { T result; - if (!accurate::convertNumeric(f.get(), result)) + if (!accurate::convertNumeric(f.safeGet(), result)) throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Field value {} is out of range of {} type", f, demangle(typeid(T).name())); return result; } else if (f.getType() == Field::Types::Int64) { T result; - if (!accurate::convertNumeric(f.get(), result)) + if (!accurate::convertNumeric(f.safeGet(), result)) throw Exception(ErrorCodes::CANNOT_CONVERT_TYPE, "Field value {} is out of range of {} type", f, demangle(typeid(T).name())); return result; } else if (f.getType() == Field::Types::Bool) { - return T(f.get()); + return T(f.safeGet()); } else if (f.getType() == Field::Types::Float64) { - Float64 x = f.get(); + Float64 x = f.safeGet(); if constexpr (std::is_floating_point_v) { return T(x); @@ -120,7 +120,7 @@ namespace if (f.getType() == Field::Types::String) { /// Allow to parse Map from string field. For the convenience. - const auto & str = f.get(); + const auto & str = f.safeGet(); return stringToMap(str); } @@ -218,7 +218,7 @@ namespace UInt64 fieldToMaxThreads(const Field & f) { if (f.getType() == Field::Types::String) - return stringToMaxThreads(f.get()); + return stringToMaxThreads(f.safeGet()); else return fieldToNumber(f); } diff --git a/src/Core/SettingsQuirks.cpp b/src/Core/SettingsQuirks.cpp index 5541cc19653..3127a5ef36d 100644 --- a/src/Core/SettingsQuirks.cpp +++ b/src/Core/SettingsQuirks.cpp @@ -100,7 +100,7 @@ void doSettingsSanityCheckClamp(Settings & current_settings, LoggerPtr log) return current_value; }; - UInt64 max_threads = get_current_value("max_threads").get(); + UInt64 max_threads = get_current_value("max_threads").safeGet(); UInt64 max_threads_max_value = 256 * getNumberOfPhysicalCPUCores(); if (max_threads > max_threads_max_value) { @@ -120,7 +120,7 @@ void doSettingsSanityCheckClamp(Settings & current_settings, LoggerPtr log) "input_format_parquet_max_block_size"}; for (auto const & setting : block_rows_settings) { - if (auto block_size = get_current_value(setting).get(); + if (auto block_size = get_current_value(setting).safeGet(); block_size > max_sane_block_rows_size) { if (log) @@ -129,7 +129,7 @@ void doSettingsSanityCheckClamp(Settings & current_settings, LoggerPtr log) } } - if (auto max_block_size = get_current_value("max_block_size").get(); max_block_size == 0) + if (auto max_block_size = get_current_value("max_block_size").safeGet(); max_block_size == 0) { if (log) LOG_WARNING(log, "Sanity check: 'max_block_size' cannot be 0. Set to default value {}", DEFAULT_BLOCK_SIZE); diff --git a/src/Core/examples/field.cpp b/src/Core/examples/field.cpp index 110e11d0cb1..3064290e127 100644 --- a/src/Core/examples/field.cpp +++ b/src/Core/examples/field.cpp @@ -37,7 +37,7 @@ int main(int argc, char ** argv) std::cerr << applyVisitor(to_string, field) << std::endl; } - field.get().push_back(field); + field.safeGet().push_back(field); std::cerr << applyVisitor(to_string, field) << std::endl; std::cerr << (field < field2) << std::endl; diff --git a/src/Core/fuzzers/CMakeLists.txt b/src/Core/fuzzers/CMakeLists.txt index 3c5c0eed4e6..61d6b9629eb 100644 --- a/src/Core/fuzzers/CMakeLists.txt +++ b/src/Core/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable (names_and_types_fuzzer names_and_types_fuzzer.cpp) -target_link_libraries (names_and_types_fuzzer PRIVATE dbms clickhouse_functions) +target_link_libraries (names_and_types_fuzzer PRIVATE clickhouse_functions) diff --git a/src/Core/tests/gtest_field.cpp b/src/Core/tests/gtest_field.cpp index 5585442d835..7e778be9575 100644 --- a/src/Core/tests/gtest_field.cpp +++ b/src/Core/tests/gtest_field.cpp @@ -8,31 +8,31 @@ GTEST_TEST(Field, FromBool) { Field f{false}; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 0); - ASSERT_EQ(f.get(), false); + ASSERT_EQ(f.safeGet(), 0); + ASSERT_EQ(f.safeGet(), false); } { Field f{true}; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 1); - ASSERT_EQ(f.get(), true); + ASSERT_EQ(f.safeGet(), 1); + ASSERT_EQ(f.safeGet(), true); } { Field f; f = false; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 0); - ASSERT_EQ(f.get(), false); + ASSERT_EQ(f.safeGet(), 0); + ASSERT_EQ(f.safeGet(), false); } { Field f; f = true; ASSERT_EQ(f.getType(), Field::Types::Bool); - ASSERT_EQ(f.get(), 1); - ASSERT_EQ(f.get(), true); + ASSERT_EQ(f.safeGet(), 1); + ASSERT_EQ(f.safeGet(), true); } } @@ -42,15 +42,15 @@ GTEST_TEST(Field, Move) Field f; f = Field{String{"Hello, world (1)"}}; - ASSERT_EQ(f.get(), "Hello, world (1)"); + ASSERT_EQ(f.safeGet(), "Hello, world (1)"); f = Field{String{"Hello, world (2)"}}; - ASSERT_EQ(f.get(), "Hello, world (2)"); + ASSERT_EQ(f.safeGet(), "Hello, world (2)"); f = Field{Array{Field{String{"Hello, world (3)"}}}}; - ASSERT_EQ(f.get()[0].get(), "Hello, world (3)"); + ASSERT_EQ(f.safeGet()[0].safeGet(), "Hello, world (3)"); f = String{"Hello, world (4)"}; - ASSERT_EQ(f.get(), "Hello, world (4)"); + ASSERT_EQ(f.safeGet(), "Hello, world (4)"); f = Array{Field{String{"Hello, world (5)"}}}; - ASSERT_EQ(f.get()[0].get(), "Hello, world (5)"); + ASSERT_EQ(f.safeGet()[0].safeGet(), "Hello, world (5)"); f = Array{String{"Hello, world (6)"}}; - ASSERT_EQ(f.get()[0].get(), "Hello, world (6)"); + ASSERT_EQ(f.safeGet()[0].safeGet(), "Hello, world (6)"); } diff --git a/src/DataTypes/DataTypeAggregateFunction.cpp b/src/DataTypes/DataTypeAggregateFunction.cpp index a4cd3b9e511..1facaaab0d6 100644 --- a/src/DataTypes/DataTypeAggregateFunction.cpp +++ b/src/DataTypes/DataTypeAggregateFunction.cpp @@ -129,7 +129,7 @@ MutableColumnPtr DataTypeAggregateFunction::createColumn() const Field DataTypeAggregateFunction::getDefault() const { Field field = AggregateFunctionStateData(); - field.get().name = getName(); + field.safeGet().name = getName(); AlignedBuffer place_buffer(function->sizeOfData(), function->alignOfData()); AggregateDataPtr place = place_buffer.data(); @@ -138,7 +138,7 @@ Field DataTypeAggregateFunction::getDefault() const try { - WriteBufferFromString buffer_from_field(field.get().data); + WriteBufferFromString buffer_from_field(field.safeGet().data); function->serialize(place, buffer_from_field, version); } catch (...) diff --git a/src/DataTypes/DataTypeCustomGeo.cpp b/src/DataTypes/DataTypeCustomGeo.cpp index 0736d837d46..f90788ec403 100644 --- a/src/DataTypes/DataTypeCustomGeo.cpp +++ b/src/DataTypes/DataTypeCustomGeo.cpp @@ -24,6 +24,13 @@ void registerDataTypeDomainGeo(DataTypeFactory & factory) std::make_unique(std::make_unique())); }); + // Custom type for multiple lines stored as Array(LineString) + factory.registerSimpleDataTypeCustom("MultiLineString", [] + { + return std::make_pair(DataTypeFactory::instance().get("Array(LineString)"), + std::make_unique(std::make_unique())); + }); + // Custom type for simple polygon without holes stored as Array(Point) factory.registerSimpleDataTypeCustom("Ring", [] { diff --git a/src/DataTypes/DataTypeCustomGeo.h b/src/DataTypes/DataTypeCustomGeo.h index 0a1c83e4638..6a632f0d05c 100644 --- a/src/DataTypes/DataTypeCustomGeo.h +++ b/src/DataTypes/DataTypeCustomGeo.h @@ -17,6 +17,12 @@ public: DataTypeLineStringName() : DataTypeCustomFixedName("LineString") {} }; +class DataTypeMultiLineStringName : public DataTypeCustomFixedName +{ +public: + DataTypeMultiLineStringName() : DataTypeCustomFixedName("MultiLineString") {} +}; + class DataTypeRingName : public DataTypeCustomFixedName { public: diff --git a/src/DataTypes/DataTypeDynamic.cpp b/src/DataTypes/DataTypeDynamic.cpp index a1b1f8325f0..5f713e9adc9 100644 --- a/src/DataTypes/DataTypeDynamic.cpp +++ b/src/DataTypes/DataTypeDynamic.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -14,6 +15,7 @@ #include #include #include +#include namespace DB { @@ -71,10 +73,10 @@ static DataTypePtr create(const ASTPtr & arguments) auto * literal = argument->arguments->children[1]->as(); - if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.get() == 0 || literal->value.get() > ColumnVariant::MAX_NESTED_COLUMNS) - throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 1 and 255"); + if (!literal || literal->value.getType() != Field::Types::UInt64 || literal->value.safeGet() > ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT) + throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "'max_types' argument for Dynamic type should be a positive integer between 0 and {}", ColumnDynamic::MAX_DYNAMIC_TYPES_LIMIT); - return std::make_shared(literal->value.get()); + return std::make_shared(literal->value.safeGet()); } void registerDataTypeDynamic(DataTypeFactory & factory) @@ -84,30 +86,72 @@ void registerDataTypeDynamic(DataTypeFactory & factory) std::unique_ptr DataTypeDynamic::getDynamicSubcolumnData(std::string_view subcolumn_name, const DB::IDataType::SubstreamData & data, bool throw_if_null) const { - auto [subcolumn_type_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name); + auto [type_subcolumn_name, subcolumn_nested_name] = Nested::splitName(subcolumn_name); /// Check if requested subcolumn is a valid data type. - auto subcolumn_type = DataTypeFactory::instance().tryGet(String(subcolumn_type_name)); + auto subcolumn_type = DataTypeFactory::instance().tryGet(String(type_subcolumn_name)); if (!subcolumn_type) { if (throw_if_null) - throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", subcolumn_type_name); + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Dynamic type doesn't have subcolumn '{}'", type_subcolumn_name); return nullptr; } std::unique_ptr res = std::make_unique(subcolumn_type->getDefaultSerialization()); res->type = subcolumn_type; std::optional discriminator; + ColumnPtr null_map_for_variant_from_shared_variant; if (data.column) { /// If column was provided, we should extract subcolumn from Dynamic column. const auto & dynamic_column = assert_cast(*data.column); const auto & variant_info = dynamic_column.getVariantInfo(); + const auto & variant_column = dynamic_column.getVariantColumn(); + const auto & shared_variant = dynamic_column.getSharedVariant(); /// Check if provided Dynamic column has subcolumn of this type. - auto it = variant_info.variant_name_to_discriminator.find(subcolumn_type->getName()); + String subcolumn_type_name = subcolumn_type->getName(); + auto it = variant_info.variant_name_to_discriminator.find(subcolumn_type_name); if (it != variant_info.variant_name_to_discriminator.end()) { discriminator = it->second; - res->column = dynamic_column.getVariantColumn().getVariantPtrByGlobalDiscriminator(*discriminator); + res->column = variant_column.getVariantPtrByGlobalDiscriminator(*discriminator); + } + /// Otherwise if there is data in shared variant try to find requested type there. + else if (!shared_variant.empty()) + { + /// Create null map for resulting subcolumn to make it Nullable. + auto null_map_column = ColumnUInt8::create(); + NullMap & null_map = assert_cast(*null_map_column).getData(); + null_map.reserve(variant_column.size()); + auto subcolumn = subcolumn_type->createColumn(); + auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(dynamic_column.getSharedVariantDiscriminator()); + const auto & local_discriminators = variant_column.getLocalDiscriminators(); + const auto & offsets = variant_column.getOffsets(); + const FormatSettings format_settings; + for (size_t i = 0; i != local_discriminators.size(); ++i) + { + if (local_discriminators[i] == shared_variant_local_discr) + { + auto value = shared_variant.getDataAt(offsets[i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + if (type->getName() == subcolumn_type_name) + { + dynamic_column.getVariantSerialization(subcolumn_type, subcolumn_type_name)->deserializeBinary(*subcolumn, buf, format_settings); + null_map.push_back(0); + } + else + { + null_map.push_back(1); + } + } + else + { + null_map.push_back(1); + } + } + + res->column = std::move(subcolumn); + null_map_for_variant_from_shared_variant = std::move(null_map_column); } } @@ -125,7 +169,7 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa return nullptr; } - res->serialization = std::make_shared(res->serialization, subcolumn_type->getName(), is_null_map_subcolumn); + res->serialization = std::make_shared(res->serialization, subcolumn_type->getName(), String(subcolumn_nested_name), is_null_map_subcolumn); /// Make resulting subcolumn Nullable only if type subcolumn can be inside Nullable or can be LowCardinality(Nullable()). bool make_subcolumn_nullable = subcolumn_type->canBeInsideNullable() || subcolumn_type->lowCardinality(); if (!is_null_map_subcolumn && make_subcolumn_nullable) @@ -133,10 +177,10 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa if (data.column) { + /// Check if provided Dynamic column has subcolumn of this type. In this case we should use VariantSubcolumnCreator/VariantNullMapSubcolumnCreator to + /// create full subcolumn from variant according to discriminators. if (discriminator) { - /// Provided Dynamic column has subcolumn of this type, we should use VariantSubcolumnCreator/VariantNullMapSubcolumnCreator to - /// create full subcolumn from variant according to discriminators. const auto & variant_column = assert_cast(*data.column).getVariantColumn(); std::unique_ptr creator; if (is_null_map_subcolumn) @@ -154,6 +198,21 @@ std::unique_ptr DataTypeDynamic::getDynamicSubcolumnDa make_subcolumn_nullable); res->column = creator->create(res->column); } + /// Check if requested type was extracted from shared variant. In this case we should use + /// VariantSubcolumnCreator to create full subcolumn from variant according to created null map. + else if (null_map_for_variant_from_shared_variant) + { + if (is_null_map_subcolumn) + { + res->column = null_map_for_variant_from_shared_variant; + } + else + { + SerializationVariantElement::VariantSubcolumnCreator creator( + null_map_for_variant_from_shared_variant, "", 0, 0, make_subcolumn_nullable, null_map_for_variant_from_shared_variant); + res->column = creator.create(res->column); + } + } /// Provided Dynamic column doesn't have subcolumn of this type, just create column filled with default values. else if (is_null_map_subcolumn) { diff --git a/src/DataTypes/DataTypeEnum.cpp b/src/DataTypes/DataTypeEnum.cpp index 08e0c0d2045..b9a5a1a5a68 100644 --- a/src/DataTypes/DataTypeEnum.cpp +++ b/src/DataTypes/DataTypeEnum.cpp @@ -122,12 +122,12 @@ Field DataTypeEnum::castToName(const Field & value_or_name) const { if (value_or_name.getType() == Field::Types::String) { - this->getValue(value_or_name.get()); /// Check correctness - return value_or_name.get(); + this->getValue(value_or_name.safeGet()); /// Check correctness + return value_or_name.safeGet(); } else if (value_or_name.getType() == Field::Types::Int64) { - Int64 value = value_or_name.get(); + Int64 value = value_or_name.safeGet(); checkOverflow(value); return this->getNameForValue(static_cast(value)).toString(); } @@ -141,12 +141,12 @@ Field DataTypeEnum::castToValue(const Field & value_or_name) const { if (value_or_name.getType() == Field::Types::String) { - return this->getValue(value_or_name.get()); + return this->getValue(value_or_name.safeGet()); } else if (value_or_name.getType() == Field::Types::Int64 || value_or_name.getType() == Field::Types::UInt64) { - Int64 value = value_or_name.get(); + Int64 value = value_or_name.safeGet(); checkOverflow(value); this->getNameForValue(static_cast(value)); /// Check correctness return value; @@ -220,7 +220,7 @@ static void autoAssignNumberForEnum(const ASTPtr & arguments) "Elements of Enum data type must be of form: " "'name' = number or 'name', where name is string literal and number is an integer"); - literal_child_assign_num = value_literal->value.get(); + literal_child_assign_num = value_literal->value.safeGet(); } assign_number_child.emplace_back(child); } @@ -269,8 +269,8 @@ static DataTypePtr createExact(const ASTPtr & arguments) "Elements of Enum data type must be of form: " "'name' = number or 'name', where name is string literal and number is an integer"); - const String & field_name = name_literal->value.get(); - const auto value = value_literal->value.get(); + const String & field_name = name_literal->value.safeGet(); + const auto value = value_literal->value.safeGet(); if (value > std::numeric_limits::max() || value < std::numeric_limits::min()) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Value {} for element '{}' exceeds range of {}", @@ -302,7 +302,7 @@ static DataTypePtr create(const ASTPtr & arguments) "Elements of Enum data type must be of form: " "'name' = number or 'name', where name is string literal and number is an integer"); - Int64 value = value_literal->value.get(); + Int64 value = value_literal->value.safeGet(); if (value > std::numeric_limits::max() || value < std::numeric_limits::min()) return createExact(arguments); diff --git a/src/DataTypes/DataTypeFactory.cpp b/src/DataTypes/DataTypeFactory.cpp index 6f7dcd65b83..ca2ebdfbdbb 100644 --- a/src/DataTypes/DataTypeFactory.cpp +++ b/src/DataTypes/DataTypeFactory.cpp @@ -150,6 +150,12 @@ DataTypePtr DataTypeFactory::getCustom(DataTypeCustomDescPtr customization) cons return type; } +DataTypePtr DataTypeFactory::getCustom(const String & base_name, DataTypeCustomDescPtr customization) const +{ + auto type = get(base_name); + type->setCustomization(std::move(customization)); + return type; +} void DataTypeFactory::registerDataType(const String & family_name, Value creator, Case case_sensitiveness) { diff --git a/src/DataTypes/DataTypeFactory.h b/src/DataTypes/DataTypeFactory.h index edba9886d1c..a8324341691 100644 --- a/src/DataTypes/DataTypeFactory.h +++ b/src/DataTypes/DataTypeFactory.h @@ -34,6 +34,7 @@ public: DataTypePtr get(const String & family_name, const ASTPtr & parameters) const; DataTypePtr get(const ASTPtr & ast) const; DataTypePtr getCustom(DataTypeCustomDescPtr customization) const; + DataTypePtr getCustom(const String & base_name, DataTypeCustomDescPtr customization) const; /// Return nullptr in case of error. DataTypePtr tryGet(const String & full_name) const; diff --git a/src/DataTypes/DataTypeFixedString.cpp b/src/DataTypes/DataTypeFixedString.cpp index 080ff8826a5..63d5245287f 100644 --- a/src/DataTypes/DataTypeFixedString.cpp +++ b/src/DataTypes/DataTypeFixedString.cpp @@ -51,11 +51,11 @@ static DataTypePtr create(const ASTPtr & arguments) "FixedString data type family must have exactly one argument - size in bytes"); const auto * argument = arguments->children[0]->as(); - if (!argument || argument->value.getType() != Field::Types::UInt64 || argument->value.get() == 0) + if (!argument || argument->value.getType() != Field::Types::UInt64 || argument->value.safeGet() == 0) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "FixedString data type family must have a number (positive integer) as its argument"); - return std::make_shared(argument->value.get()); + return std::make_shared(argument->value.safeGet()); } diff --git a/src/DataTypes/DataTypeObject.cpp b/src/DataTypes/DataTypeObject.cpp index 91b9bfcb2a5..5fd69688194 100644 --- a/src/DataTypes/DataTypeObject.cpp +++ b/src/DataTypes/DataTypeObject.cpp @@ -69,7 +69,7 @@ static DataTypePtr create(const ASTPtr & arguments) throw Exception(ErrorCodes::UNEXPECTED_AST_STRUCTURE, "Object data type family must have a const string as its schema name parameter"); - return std::make_shared(literal->value.get(), is_nullable); + return std::make_shared(literal->value.safeGet(), is_nullable); } void registerDataTypeObject(DataTypeFactory & factory) diff --git a/src/DataTypes/DataTypeTuple.cpp b/src/DataTypes/DataTypeTuple.cpp index e96937d522d..67b4a0a5e31 100644 --- a/src/DataTypes/DataTypeTuple.cpp +++ b/src/DataTypes/DataTypeTuple.cpp @@ -192,17 +192,12 @@ MutableColumnPtr DataTypeTuple::createColumn() const MutableColumnPtr DataTypeTuple::createColumn(const ISerialization & serialization) const { - /// If we read Tuple as Variant subcolumn, it may be wrapped to SerializationVariantElement. - /// Here we don't need it, so we drop this wrapper. - const auto * current_serialization = &serialization; - while (const auto * serialization_variant_element = typeid_cast(current_serialization)) - current_serialization = serialization_variant_element->getNested().get(); - - /// If we read subcolumn of nested Tuple, it may be wrapped to SerializationNamed + /// If we read subcolumn of nested Tuple or this Tuple is a subcolumn, it may be wrapped to SerializationWrapper /// several times to allow to reconstruct the substream path name. /// Here we don't need substream path name, so we drop first several wrapper serializations. - while (const auto * serialization_named = typeid_cast(current_serialization)) - current_serialization = serialization_named->getNested().get(); + const auto * current_serialization = &serialization; + while (const auto * serialization_wrapper = dynamic_cast(current_serialization)) + current_serialization = serialization_wrapper->getNested().get(); const auto * serialization_tuple = typeid_cast(current_serialization); if (!serialization_tuple) diff --git a/src/DataTypes/DataTypeVariant.cpp b/src/DataTypes/DataTypeVariant.cpp index 8a10ca7d06d..cc8d04e94da 100644 --- a/src/DataTypes/DataTypeVariant.cpp +++ b/src/DataTypes/DataTypeVariant.cpp @@ -117,7 +117,7 @@ bool DataTypeVariant::equals(const IDataType & rhs) const /// The same data types with different custom names considered different. /// For example, UInt8 and Bool. - if ((variants[i]->hasCustomName() || rhs_variant.variants[i]) && variants[i]->getName() != rhs_variant.variants[i]->getName()) + if ((variants[i]->hasCustomName() || rhs_variant.variants[i]->hasCustomName()) && variants[i]->getName() != rhs_variant.variants[i]->getName()) return false; } diff --git a/src/DataTypes/DataTypesBinaryEncoding.cpp b/src/DataTypes/DataTypesBinaryEncoding.cpp index bd994e313ba..610f246265e 100644 --- a/src/DataTypes/DataTypesBinaryEncoding.cpp +++ b/src/DataTypes/DataTypesBinaryEncoding.cpp @@ -444,7 +444,7 @@ void encodeDataType(const DataTypePtr & type, WriteBuffer & buf) case BinaryTypeIndex::Dynamic: { const auto & dynamic_type = assert_cast(*type); - /// Maximum number of dynamic types is 255, we can write it as 1 byte. + /// Maximum number of dynamic types is 254, we can write it as 1 byte. writeBinary(UInt8(dynamic_type.getMaxDynamicTypes()), buf); break; } diff --git a/src/DataTypes/DataTypesDecimal.cpp b/src/DataTypes/DataTypesDecimal.cpp index a427fd0717a..1d8f7711de1 100644 --- a/src/DataTypes/DataTypesDecimal.cpp +++ b/src/DataTypes/DataTypesDecimal.cpp @@ -80,14 +80,14 @@ static DataTypePtr create(const ASTPtr & arguments) const auto * precision_arg = arguments->children[0]->as(); if (!precision_arg || precision_arg->value.getType() != Field::Types::UInt64) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Decimal argument precision is invalid"); - precision = precision_arg->value.get(); + precision = precision_arg->value.safeGet(); if (arguments->children.size() == 2) { const auto * scale_arg = arguments->children[1]->as(); if (!scale_arg || !isInt64OrUInt64FieldType(scale_arg->value.getType())) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Decimal argument scale is invalid"); - scale = scale_arg->value.get(); + scale = scale_arg->value.safeGet(); } } @@ -107,7 +107,7 @@ static DataTypePtr createExact(const ASTPtr & arguments) "Decimal32 | Decimal64 | Decimal128 | Decimal256 data type family must have a one number as its argument"); UInt64 precision = DecimalUtils::max_precision; - UInt64 scale = scale_arg->value.get(); + UInt64 scale = scale_arg->value.safeGet(); return createDecimal(precision, scale); } diff --git a/src/DataTypes/ObjectUtils.cpp b/src/DataTypes/ObjectUtils.cpp index 356e609e77a..fe8a5ede102 100644 --- a/src/DataTypes/ObjectUtils.cpp +++ b/src/DataTypes/ObjectUtils.cpp @@ -135,7 +135,7 @@ Array createEmptyArrayField(size_t num_dimensions) for (size_t i = 1; i < num_dimensions; ++i) { current_array->push_back(Array()); - current_array = ¤t_array->back().get(); + current_array = ¤t_array->back().safeGet(); } return array; diff --git a/src/DataTypes/Serializations/JSONDataParser.cpp b/src/DataTypes/Serializations/JSONDataParser.cpp index 56641424396..0f74815f5b4 100644 --- a/src/DataTypes/Serializations/JSONDataParser.cpp +++ b/src/DataTypes/Serializations/JSONDataParser.cpp @@ -131,7 +131,7 @@ void JSONDataParser::traverseArrayElement(const Element & element, P auto nested_hash = getHashOfNestedPath(paths[i], values[i]); if (nested_hash) { - size_t array_size = values[i].template get().size(); + size_t array_size = values[i].template safeGet().size(); auto & current_nested_sizes = ctx.nested_sizes_by_path[*nested_hash]; if (current_nested_sizes.size() == ctx.current_size) @@ -154,7 +154,7 @@ void JSONDataParser::traverseArrayElement(const Element & element, P auto nested_hash = getHashOfNestedPath(paths[i], values[i]); if (nested_hash) { - size_t array_size = values[i].template get().size(); + size_t array_size = values[i].template safeGet().size(); auto & current_nested_sizes = ctx.nested_sizes_by_path[*nested_hash]; if (current_nested_sizes.empty()) diff --git a/src/DataTypes/Serializations/SerializationAggregateFunction.cpp b/src/DataTypes/Serializations/SerializationAggregateFunction.cpp index 55f7641e058..41b198890e4 100644 --- a/src/DataTypes/Serializations/SerializationAggregateFunction.cpp +++ b/src/DataTypes/Serializations/SerializationAggregateFunction.cpp @@ -16,14 +16,14 @@ namespace DB void SerializationAggregateFunction::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - const AggregateFunctionStateData & state = field.get(); + const AggregateFunctionStateData & state = field.safeGet(); writeBinary(state.data, ostr); } void SerializationAggregateFunction::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const { field = AggregateFunctionStateData(); - AggregateFunctionStateData & s = field.get(); + AggregateFunctionStateData & s = field.safeGet(); readBinary(s.data, istr); s.name = type_name; } diff --git a/src/DataTypes/Serializations/SerializationArray.cpp b/src/DataTypes/Serializations/SerializationArray.cpp index b7d43332085..0a9c4529e23 100644 --- a/src/DataTypes/Serializations/SerializationArray.cpp +++ b/src/DataTypes/Serializations/SerializationArray.cpp @@ -29,7 +29,7 @@ static constexpr size_t MAX_ARRAYS_SIZE = 1ULL << 40; void SerializationArray::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const Array & a = field.get(); + const Array & a = field.safeGet(); writeVarUInt(a.size(), ostr); for (const auto & i : a) { @@ -51,7 +51,7 @@ void SerializationArray::deserializeBinary(Field & field, ReadBuffer & istr, con settings.binary.max_binary_string_size); field = Array(); - Array & arr = field.get(); + Array & arr = field.safeGet(); arr.reserve(size); for (size_t i = 0; i < size; ++i) nested->deserializeBinary(arr.emplace_back(), istr, settings); diff --git a/src/DataTypes/Serializations/SerializationDecimalBase.cpp b/src/DataTypes/Serializations/SerializationDecimalBase.cpp index 49dc042e872..8927f949368 100644 --- a/src/DataTypes/Serializations/SerializationDecimalBase.cpp +++ b/src/DataTypes/Serializations/SerializationDecimalBase.cpp @@ -13,7 +13,7 @@ namespace DB template void SerializationDecimalBase::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - FieldType x = field.get>(); + FieldType x = field.safeGet>(); writeBinaryLittleEndian(x, ostr); } diff --git a/src/DataTypes/Serializations/SerializationDynamic.cpp b/src/DataTypes/Serializations/SerializationDynamic.cpp index 7609ffc91ca..9cd0adcc2ed 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.cpp +++ b/src/DataTypes/Serializations/SerializationDynamic.cpp @@ -27,15 +27,21 @@ namespace ErrorCodes struct SerializeBinaryBulkStateDynamic : public ISerialization::SerializeBinaryBulkState { SerializationDynamic::DynamicStructureSerializationVersion structure_version; + size_t max_dynamic_types; DataTypePtr variant_type; Names variant_names; SerializationPtr variant_serialization; ISerialization::SerializeBinaryBulkStatePtr variant_state; - /// Variants statistics. Map (Variant name) -> (Variant size). - ColumnDynamic::Statistics statistics = { .source = ColumnDynamic::Statistics::Source::READ, .data = {} }; + /// Variants statistics. + ColumnDynamic::Statistics statistics; + /// If true, statistics will be recalculated during serialization. + bool recalculate_statistics = false; - explicit SerializeBinaryBulkStateDynamic(UInt64 structure_version_) : structure_version(structure_version_) {} + explicit SerializeBinaryBulkStateDynamic(UInt64 structure_version_) + : structure_version(structure_version_), statistics(ColumnDynamic::Statistics::Source::READ) + { + } }; struct DeserializeBinaryBulkStateDynamic : public ISerialization::DeserializeBinaryBulkState @@ -106,20 +112,41 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( writeBinaryLittleEndian(structure_version, *stream); auto dynamic_state = std::make_shared(structure_version); + dynamic_state->max_dynamic_types = column_dynamic.getMaxDynamicTypes(); + /// Write max_dynamic_types parameter, because it can differ from the max_dynamic_types + /// that is specified in the Dynamic type (we could decrease it before merge). + writeBinaryLittleEndian(dynamic_state->max_dynamic_types, *stream); + dynamic_state->variant_type = variant_info.variant_type; dynamic_state->variant_names = variant_info.variant_names; const auto & variant_column = column_dynamic.getVariantColumn(); - /// Write internal Variant type name. + /// Write information about variants. + size_t num_variants = dynamic_state->variant_names.size() - 1; /// Don't write shared variant, Dynamic column should always have it. + writeBinaryLittleEndian(num_variants, *stream); if (settings.data_types_binary_encoding) - encodeDataType(dynamic_state->variant_type, *stream); + { + const auto & variants = assert_cast(*dynamic_state->variant_type).getVariants(); + for (const auto & variant: variants) + { + if (variant->getName() != ColumnDynamic::getSharedVariantTypeName()) + encodeDataType(variant, *stream); + } + } else - writeStringBinary(dynamic_state->variant_type->getName(), *stream); + { + for (const auto & name : dynamic_state->variant_names) + { + if (name != ColumnDynamic::getSharedVariantTypeName()) + writeStringBinary(name, *stream); + } + } /// Write statistics in prefix if needed. if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::PREFIX) { const auto & statistics = column_dynamic.getStatistics(); + /// First, write statistics for usual variants. for (size_t i = 0; i != variant_info.variant_names.size(); ++i) { size_t size = 0; @@ -129,13 +156,55 @@ void SerializationDynamic::serializeBinaryBulkStatePrefix( /// - statistics read from the data part during deserialization of Dynamic column (Statistics::Source::READ). /// We can rely only on statistics calculated during the merge, because column with statistics that was read /// during deserialization from some data part could be filtered/limited/transformed/etc and so the statistics can be outdated. - if (!statistics.data.empty() && statistics.source == ColumnDynamic::Statistics::Source::MERGE) - size = statistics.data.at(variant_info.variant_names[i]); + if (statistics && statistics->source == ColumnDynamic::Statistics::Source::MERGE) + size = statistics->variants_statistics.at(variant_info.variant_names[i]); /// Otherwise we can use only variant sizes from current column. else size = variant_column.getVariantByGlobalDiscriminator(i).size(); writeVarUInt(size, *stream); } + + /// Second, write statistics for variants in shared variant. + /// Check if we have statistics calculated during merge of some data parts (Statistics::Source::MERGE). + if (statistics && statistics->source == ColumnDynamic::Statistics::Source::MERGE) + { + writeVarUInt(statistics->shared_variants_statistics.size(), *stream); + for (const auto & [variant_name, size] : statistics->shared_variants_statistics) + { + writeStringBinary(variant_name, *stream); + writeVarUInt(size, *stream); + } + } + /// If we don't have statistics for shared variants from merge, calculate it from the column. + else + { + std::unordered_map shared_variants_statistics; + const auto & shared_variant = column_dynamic.getSharedVariant(); + for (size_t i = 0; i != shared_variant.size(); ++i) + { + auto value = shared_variant.getDataAt(i); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + if (auto it = shared_variants_statistics.find(type_name); it != shared_variants_statistics.end()) + ++it->second; + else if (shared_variants_statistics.size() < ColumnDynamic::Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) + shared_variants_statistics.emplace(type_name, 1); + } + + writeVarUInt(shared_variants_statistics.size(), *stream); + for (const auto & [variant_name, size] : shared_variants_statistics) + { + writeStringBinary(variant_name, *stream); + writeVarUInt(size, *stream); + } + } + } + /// Otherwise statistics will be written in the suffix, in this case we will recalculate + /// statistics during serialization to make it more precise. + else + { + dynamic_state->recalculate_statistics = true; } dynamic_state->variant_serialization = dynamic_state->variant_type->getDefaultSerialization(); @@ -182,33 +251,58 @@ ISerialization::DeserializeBinaryBulkStatePtr SerializationDynamic::deserializeD UInt64 structure_version; readBinaryLittleEndian(structure_version, *structure_stream); auto structure_state = std::make_shared(structure_version); - /// Read internal Variant type name. + /// Read max_dynamic_types parameter. + readBinaryLittleEndian(structure_state->max_dynamic_types, *structure_stream); + /// Read information about variants. + DataTypes variants; + size_t num_variants; + readBinaryLittleEndian(num_variants, *structure_stream); + variants.reserve(num_variants + 1); /// +1 for shared variant. if (settings.data_types_binary_encoding) { - structure_state->variant_type = decodeDataType(*structure_stream); + for (size_t i = 0; i != num_variants; ++i) + variants.push_back(decodeDataType(*structure_stream)); } else { String data_type_name; - readStringBinary(data_type_name, *structure_stream); - structure_state->variant_type = DataTypeFactory::instance().get(data_type_name); + for (size_t i = 0; i != num_variants; ++i) + { + readStringBinary(data_type_name, *structure_stream); + variants.push_back(DataTypeFactory::instance().get(data_type_name)); + } } - const auto * variant_type = typeid_cast(structure_state->variant_type.get()); - if (!variant_type) - throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type of Dynamic nested column, expected Variant, got {}", structure_state->variant_type->getName()); + /// Add shared variant, Dynamic column should always have it. + variants.push_back(ColumnDynamic::getSharedVariantDataType()); + auto variant_type = std::make_shared(variants); /// Read statistics. if (settings.dynamic_read_statistics) { - const auto & variants = variant_type->getVariants(); + ColumnDynamic::Statistics statistics(ColumnDynamic::Statistics::Source::READ); + /// First, read statistics for usual variants. size_t variant_size; - for (const auto & variant : variants) + for (const auto & variant : variant_type->getVariants()) { readVarUInt(variant_size, *structure_stream); - structure_state->statistics.data[variant->getName()] = variant_size; + statistics.variants_statistics[variant->getName()] = variant_size; } + + /// Second, read statistics for shared variants. + size_t statistics_size; + readVarUInt(statistics_size, *structure_stream); + String variant_name; + for (size_t i = 0; i != statistics_size; ++i) + { + readStringBinary(variant_name, *structure_stream); + readVarUInt(variant_size, *structure_stream); + statistics.shared_variants_statistics[variant_name] = variant_size; + } + + structure_state->statistics = std::make_shared(std::move(statistics)); } + structure_state->variant_type = std::move(variant_type); state = structure_state; addToSubstreamsDeserializeStatesCache(cache, settings.path, state); } @@ -231,8 +325,16 @@ void SerializationDynamic::serializeBinaryBulkStateSuffix( /// Write statistics in suffix if needed. if (settings.dynamic_write_statistics == SerializeBinaryBulkSettings::DynamicStatisticsMode::SUFFIX) { + /// First, write statistics for usual variants. for (const auto & variant_name : dynamic_state->variant_names) - writeVarUInt(dynamic_state->statistics.data[variant_name], *stream); + writeVarUInt(dynamic_state->statistics.variants_statistics[variant_name], *stream); + /// Second, write statistics for shared variants. + writeVarUInt(dynamic_state->statistics.shared_variants_statistics.size(), *stream); + for (const auto & [variant_name, size] : dynamic_state->statistics.shared_variants_statistics) + { + writeStringBinary(variant_name, *stream); + writeVarUInt(size, *stream); + } } settings.path.push_back(Substream::DynamicData); @@ -255,9 +357,42 @@ void SerializationDynamic::serializeBinaryBulkWithMultipleStreams( if (!variant_info.variant_type->equals(*dynamic_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", dynamic_state->variant_type->getName(), variant_info.variant_type->getName()); + if (column_dynamic.getMaxDynamicTypes() != dynamic_state->max_dynamic_types) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of max_dynamic_types parameter of Dynamic. Expected: {}, Got: {}", dynamic_state->max_dynamic_types, column_dynamic.getMaxDynamicTypes()); + settings.path.push_back(Substream::DynamicData); - assert_cast(*dynamic_state->variant_serialization) - .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.data); + if (dynamic_state->recalculate_statistics) + { + assert_cast(*dynamic_state->variant_serialization) + .serializeBinaryBulkWithMultipleStreamsAndUpdateVariantStatistics(*variant_column, offset, limit, settings, dynamic_state->variant_state, dynamic_state->statistics.variants_statistics); + /// Calculate statistics for shared variants. + const auto & shared_variant = column_dynamic.getSharedVariant(); + if (!shared_variant.empty()) + { + const auto & local_discriminators = variant_column->getLocalDiscriminators(); + const auto & offsets = variant_column->getOffsets(); + const auto shared_variant_discr = variant_column->localDiscriminatorByGlobal(column_dynamic.getSharedVariantDiscriminator()); + size_t end = limit == 0 || offset + limit > local_discriminators.size() ? local_discriminators.size() : offset + limit; + for (size_t i = offset; i != end; ++i) + { + if (local_discriminators[i] == shared_variant_discr) + { + auto value = shared_variant.getDataAt(offsets[i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + if (auto it = dynamic_state->statistics.shared_variants_statistics.find(type_name); it != dynamic_state->statistics.shared_variants_statistics.end()) + ++it->second; + else if (dynamic_state->statistics.shared_variants_statistics.size() < ColumnDynamic::Statistics::MAX_SHARED_VARIANT_STATISTICS_SIZE) + dynamic_state->statistics.shared_variants_statistics.emplace(type_name, 1); + } + } + } + } + else + { + assert_cast(*dynamic_state->variant_serialization).serializeBinaryBulkWithMultipleStreams(*variant_column, offset, limit, settings, dynamic_state->variant_state); + } settings.path.pop_back(); } @@ -272,13 +407,17 @@ void SerializationDynamic::deserializeBinaryBulkWithMultipleStreams( return; auto mutable_column = column->assumeMutable(); + auto & column_dynamic = assert_cast(*mutable_column); auto * dynamic_state = checkAndGetState(state); auto * structure_state = checkAndGetState(dynamic_state->structure_state); if (mutable_column->empty()) - mutable_column = ColumnDynamic::create(structure_state->variant_type->createColumn(), structure_state->variant_type, max_dynamic_types, structure_state->statistics); + { + column_dynamic.setMaxDynamicPaths(structure_state->max_dynamic_types); + column_dynamic.setVariantType(structure_state->variant_type); + column_dynamic.setStatistics(structure_state->statistics); + } - auto & column_dynamic = assert_cast(*mutable_column); const auto & variant_info = column_dynamic.getVariantInfo(); if (!variant_info.variant_type->equals(*structure_state->variant_type)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Mismatch of internal columns of Dynamic. Expected: {}, Got: {}", structure_state->variant_type->getName(), variant_info.variant_type->getName()); @@ -329,24 +468,42 @@ void SerializationDynamic::serializeBinary(const IColumn & column, size_t row_nu encodeDataType(std::make_shared(), ostr); return; } + /// Check if this value is in shared variant. In this case it's already + /// in desired binary format. + else if (global_discr == dynamic_column.getSharedVariantDiscriminator()) + { + auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(row_num)); + ostr.write(value.data, value.size); + return; + } const auto & variant_type = assert_cast(*variant_info.variant_type).getVariant(global_discr); + const auto & variant_type_name = variant_info.variant_names[global_discr]; encodeDataType(variant_type, ostr); - variant_type->getDefaultSerialization()->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings); + dynamic_column.getVariantSerialization(variant_type, variant_type_name)->serializeBinary(variant_column.getVariantByGlobalDiscriminator(global_discr), variant_column.offsetAt(row_num), ostr, settings); } -template -static void deserializeVariant( +template +static ReturnType deserializeVariant( ColumnVariant & variant_column, - const DataTypePtr & variant_type, + const SerializationPtr & variant_serialization, ColumnVariant::Discriminator global_discr, ReadBuffer & istr, DeserializeFunc deserialize) { auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discr); - deserialize(*variant_type->getDefaultSerialization(), variant, istr); + if constexpr (std::is_same_v) + { + if (!deserialize(*variant_serialization, variant, istr)) + return ReturnType(false); + } + else + { + deserialize(*variant_serialization, variant, istr); + } variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(global_discr)); variant_column.getOffsets().push_back(variant.size() - 1); + return ReturnType(true); } void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -360,11 +517,12 @@ void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr } auto variant_type_name = variant_type->getName(); + const auto & variant_serialization = dynamic_column.getVariantSerialization(variant_type, variant_type_name); const auto & variant_info = dynamic_column.getVariantInfo(); auto it = variant_info.variant_name_to_discriminator.find(variant_type_name); if (it != variant_info.variant_name_to_discriminator.end()) { - deserializeVariant(dynamic_column.getVariantColumn(), variant_type, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); + deserializeVariant(dynamic_column.getVariantColumn(), variant_serialization, it->second, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); return; } @@ -372,25 +530,15 @@ void SerializationDynamic::deserializeBinary(IColumn & column, ReadBuffer & istr if (dynamic_column.addNewVariant(variant_type)) { auto discr = variant_info.variant_name_to_discriminator.at(variant_type_name); - deserializeVariant(dynamic_column.getVariantColumn(), variant_type, discr, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); + deserializeVariant(dynamic_column.getVariantColumn(), variant_serialization, discr, istr, [&settings](const ISerialization & serialization, IColumn & variant, ReadBuffer & buf){ serialization.deserializeBinary(variant, buf, settings); }); return; } /// We reached maximum number of variants and couldn't add new variant. - /// This case should be really rare in real use cases. - /// We should always be able to add String variant and insert value as String. - dynamic_column.addStringVariant(); + /// In this case we insert this value into shared variant in binary form. auto tmp_variant_column = variant_type->createColumn(); - variant_type->getDefaultSerialization()->deserializeBinary(*tmp_variant_column, istr, settings); - auto string_column = castColumn(ColumnWithTypeAndName(tmp_variant_column->getPtr(), variant_type, ""), std::make_shared()); - auto & variant_column = dynamic_column.getVariantColumn(); - variant_column.insertIntoVariantFrom(variant_info.variant_name_to_discriminator.at("String"), *string_column, 0); -} - -void SerializationDynamic::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const -{ - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextCSV(dynamic_column.getVariantColumn(), row_num, ostr, settings); + variant_serialization->deserializeBinary(*tmp_variant_column, istr, settings); + dynamic_column.insertValueIntoSharedVariant(*tmp_variant_column, variant_type, variant_type_name, 0); } template @@ -406,6 +554,7 @@ static void deserializeTextImpl( auto & dynamic_column = assert_cast(column); auto & variant_column = dynamic_column.getVariantColumn(); const auto & variant_info = dynamic_column.getVariantInfo(); + const auto & variant_types = assert_cast(*variant_info.variant_type).getVariants(); String field = read_field(istr); auto field_buf = std::make_unique(field); JSONInferenceInfo json_info; @@ -413,27 +562,81 @@ static void deserializeTextImpl( if (escaping_rule == FormatSettings::EscapingRule::JSON) transformFinalInferredJSONTypeIfNeeded(variant_type, settings, &json_info); - if (checkIfTypeIsComplete(variant_type) && dynamic_column.addNewVariant(variant_type)) + /// If inferred type is not complete, we cannot add it as a new variant. + /// Let's try to deserialize this field into existing variants. + /// If failed, insert this value as String. + if (!checkIfTypeIsComplete(variant_type)) + { + size_t shared_variant_discr = dynamic_column.getSharedVariantDiscriminator(); + for (size_t i = 0; i != variant_types.size(); ++i) + { + field_buf = std::make_unique(field); + if (i != shared_variant_discr + && deserializeVariant( + variant_column, + dynamic_column.getVariantSerialization(variant_types[i], variant_info.variant_names[i]), + i, + *field_buf, + try_deserialize_variant)) + return; + } + + variant_type = std::make_shared(); + /// To be able to deserialize field as String with Quoted escaping rule, it should be quoted. + if (escaping_rule == FormatSettings::EscapingRule::Quoted && (field.size() < 2 || field.front() != '\'' || field.back() != '\'')) + field = "'" + field + "'"; + } + else if (dynamic_column.addNewVariant(variant_type, variant_type->getName())) { auto discr = variant_info.variant_name_to_discriminator.at(variant_type->getName()); - deserializeVariant(dynamic_column.getVariantColumn(), variant_type, discr, *field_buf, deserialize_variant); + deserializeVariant(dynamic_column.getVariantColumn(), dynamic_column.getVariantSerialization(variant_type), discr, *field_buf, deserialize_variant); return; } - /// We couldn't infer type or add new variant. Try to insert field into current variants. + /// We couldn't infer type or add new variant. Insert it into shared variant. + auto tmp_variant_column = variant_type->createColumn(); field_buf = std::make_unique(field); - if (try_deserialize_variant(*variant_info.variant_type->getDefaultSerialization(), variant_column, *field_buf)) - return; + auto variant_type_name = variant_type->getName(); + deserialize_variant(*dynamic_column.getVariantSerialization(variant_type, variant_type_name), *tmp_variant_column, *field_buf); + dynamic_column.insertValueIntoSharedVariant(*tmp_variant_column, variant_type, variant_type_name, 0); +} - /// We couldn't insert field into any existing variant, add String variant and read value as String. - dynamic_column.addStringVariant(); +template +static void serializeTextImpl( + const IColumn & column, + size_t row_num, + WriteBuffer & ostr, + const FormatSettings & settings, + NestedSerialize nested_serialize) +{ + const auto & dynamic_column = assert_cast(column); + const auto & variant_column = dynamic_column.getVariantColumn(); + /// Check if this row has value in shared variant. In this case we should first deserialize it from binary format. + if (variant_column.globalDiscriminatorAt(row_num) == dynamic_column.getSharedVariantDiscriminator()) + { + auto value = dynamic_column.getSharedVariant().getDataAt(variant_column.offsetAt(row_num)); + ReadBufferFromMemory buf(value.data, value.size); + auto variant_type = decodeDataType(buf); + auto tmp_variant_column = variant_type->createColumn(); + auto variant_serialization = dynamic_column.getVariantSerialization(variant_type); + variant_serialization->deserializeBinary(*tmp_variant_column, buf, settings); + nested_serialize(*variant_serialization, *tmp_variant_column, 0, ostr); + } + /// Otherwise just use serialization for Variant. + else + { + nested_serialize(*dynamic_column.getVariantInfo().variant_type->getDefaultSerialization(), variant_column, row_num, ostr); + } +} - if (escaping_rule == FormatSettings::EscapingRule::Quoted && (field.size() < 2 || field.front() != '\'' || field.back() != '\'')) - field = "'" + field + "'"; +void SerializationDynamic::serializeTextCSV(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const +{ + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextCSV(col, row, buf, settings); + }; - field_buf = std::make_unique(field); - auto string_discr = variant_info.variant_name_to_discriminator.at("String"); - deserializeVariant(dynamic_column.getVariantColumn(), std::make_shared(), string_discr, *field_buf, deserialize_variant); + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextCSV(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -466,8 +669,12 @@ bool SerializationDynamic::tryDeserializeTextCSV(DB::IColumn & column, DB::ReadB void SerializationDynamic::serializeTextEscaped(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextEscaped(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextEscaped(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextEscaped(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -500,8 +707,12 @@ bool SerializationDynamic::tryDeserializeTextEscaped(DB::IColumn & column, DB::R void SerializationDynamic::serializeTextQuoted(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextQuoted(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextQuoted(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextQuoted(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -534,8 +745,12 @@ bool SerializationDynamic::tryDeserializeTextQuoted(DB::IColumn & column, DB::Re void SerializationDynamic::serializeTextJSON(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextJSON(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextJSON(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextJSON(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -568,8 +783,12 @@ bool SerializationDynamic::tryDeserializeTextJSON(DB::IColumn & column, DB::Read void SerializationDynamic::serializeTextRaw(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextRaw(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextRaw(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeTextRaw(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -602,8 +821,12 @@ bool SerializationDynamic::tryDeserializeTextRaw(DB::IColumn & column, DB::ReadB void SerializationDynamic::serializeText(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeText(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeText(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } void SerializationDynamic::deserializeWholeText(IColumn & column, ReadBuffer & istr, const FormatSettings & settings) const @@ -636,8 +859,12 @@ bool SerializationDynamic::tryDeserializeWholeText(DB::IColumn & column, DB::Rea void SerializationDynamic::serializeTextXML(const IColumn & column, size_t row_num, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & dynamic_column = assert_cast(column); - dynamic_column.getVariantInfo().variant_type->getDefaultSerialization()->serializeTextXML(dynamic_column.getVariantColumn(), row_num, ostr, settings); + auto nested_serialize = [&settings](const ISerialization & serialization, const IColumn & col, size_t row, WriteBuffer & buf) + { + serialization.serializeTextXML(col, row, buf, settings); + }; + + serializeTextImpl(column, row_num, ostr, settings, nested_serialize); } } diff --git a/src/DataTypes/Serializations/SerializationDynamic.h b/src/DataTypes/Serializations/SerializationDynamic.h index 001a3cf87ce..3dbf311fb6c 100644 --- a/src/DataTypes/Serializations/SerializationDynamic.h +++ b/src/DataTypes/Serializations/SerializationDynamic.h @@ -105,9 +105,13 @@ private: { DynamicStructureSerializationVersion structure_version; DataTypePtr variant_type; - ColumnDynamic::Statistics statistics = {.source = ColumnDynamic::Statistics::Source::READ, .data = {}}; + size_t max_dynamic_types; + ColumnDynamic::StatisticsPtr statistics; - explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) : structure_version(structure_version_) {} + explicit DeserializeBinaryBulkStateDynamicStructure(UInt64 structure_version_) + : structure_version(structure_version_) + { + } }; size_t max_dynamic_types; diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.cpp b/src/DataTypes/Serializations/SerializationDynamicElement.cpp index 211f0ac9377..cffca14bca5 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.cpp +++ b/src/DataTypes/Serializations/SerializationDynamicElement.cpp @@ -4,7 +4,10 @@ #include #include #include +#include #include +#include +#include #include namespace DB @@ -21,6 +24,8 @@ struct DeserializeBinaryBulkStateDynamicElement : public ISerialization::Deseria ISerialization::DeserializeBinaryBulkStatePtr structure_state; SerializationPtr variant_serialization; ISerialization::DeserializeBinaryBulkStatePtr variant_element_state; + bool read_from_shared_variant; + ColumnPtr shared_variant; }; void SerializationDynamicElement::enumerateStreams( @@ -73,9 +78,10 @@ void SerializationDynamicElement::deserializeBinaryBulkStatePrefix( auto dynamic_element_state = std::make_shared(); dynamic_element_state->structure_state = std::move(structure_state); - const auto & variant_type = checkAndGetState(dynamic_element_state->structure_state)->variant_type; + const auto & variant_type = assert_cast( + *checkAndGetState(dynamic_element_state->structure_state)->variant_type); /// Check if we actually have required element in the Variant. - if (auto global_discr = assert_cast(*variant_type).tryGetVariantDiscriminator(dynamic_element_name)) + if (auto global_discr = variant_type.tryGetVariantDiscriminator(dynamic_element_name)) { settings.path.push_back(Substream::DynamicData); if (is_null_map_subcolumn) @@ -83,6 +89,21 @@ void SerializationDynamicElement::deserializeBinaryBulkStatePrefix( else dynamic_element_state->variant_serialization = std::make_shared(nested_serialization, dynamic_element_name, *global_discr); dynamic_element_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_element_state->variant_element_state, cache); + dynamic_element_state->read_from_shared_variant = false; + settings.path.pop_back(); + } + /// If we don't have this element in the Variant, we will read shared variant and try to find it there. + else + { + auto shared_variant_global_discr = variant_type.tryGetVariantDiscriminator(ColumnDynamic::getSharedVariantTypeName()); + chassert(shared_variant_global_discr.has_value()); + settings.path.push_back(Substream::DynamicData); + dynamic_element_state->variant_serialization = std::make_shared( + ColumnDynamic::getSharedVariantDataType()->getDefaultSerialization(), + ColumnDynamic::getSharedVariantTypeName(), + *shared_variant_global_discr); + dynamic_element_state->variant_serialization->deserializeBinaryBulkStatePrefix(settings, dynamic_element_state->variant_element_state, cache); + dynamic_element_state->read_from_shared_variant = true; settings.path.pop_back(); } @@ -115,23 +136,103 @@ void SerializationDynamicElement::deserializeBinaryBulkWithMultipleStreams( auto * dynamic_element_state = checkAndGetState(state); - if (dynamic_element_state->variant_serialization) + /// Check if this subcolumn should not be read from shared variant. + /// In this case just read data from the corresponding variant. + if (!dynamic_element_state->read_from_shared_variant) { settings.path.push_back(Substream::DynamicData); - dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams(result_column, limit, settings, dynamic_element_state->variant_element_state, cache); + dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams( + result_column, limit, settings, dynamic_element_state->variant_element_state, cache); settings.path.pop_back(); } - else if (is_null_map_subcolumn) - { - auto mutable_column = result_column->assumeMutable(); - auto & data = assert_cast(*mutable_column).getData(); - data.resize_fill(data.size() + limit, 1); - } + /// Otherwise, read the shared variant column and extract requested type from it. else { - auto mutable_column = result_column->assumeMutable(); - mutable_column->insertManyDefaults(limit); - result_column = std::move(mutable_column); + settings.path.push_back(Substream::DynamicData); + /// Initialize shared_variant column if needed. + if (result_column->empty()) + dynamic_element_state->shared_variant = makeNullable(ColumnDynamic::getSharedVariantDataType()->createColumn()); + size_t prev_size = result_column->size(); + dynamic_element_state->variant_serialization->deserializeBinaryBulkWithMultipleStreams( + dynamic_element_state->shared_variant, limit, settings, dynamic_element_state->variant_element_state, cache); + settings.path.pop_back(); + + /// If we need to read a subcolumn from variant column, create an empty variant column, fill it and extract subcolumn. + auto variant_type = DataTypeFactory::instance().get(dynamic_element_name); + auto result_type = makeNullableOrLowCardinalityNullableSafe(variant_type); + MutableColumnPtr variant_column = nested_subcolumn.empty() || is_null_map_subcolumn ? result_column->assumeMutable() : result_type->createColumn(); + variant_column->reserve(variant_column->size() + limit); + MutableColumnPtr non_nullable_variant_column = variant_column->assumeMutable(); + NullMap * null_map = nullptr; + bool is_low_cardinality_nullable = isColumnLowCardinalityNullable(*variant_column); + /// Resulting subolumn can be Nullable, but value is serialized in shared variant as non-Nullable. + /// Extract non-nullable column and remember the null map to fill it during deserialization. + if (isColumnNullable(*variant_column)) + { + auto & nullable_variant_column = assert_cast(*variant_column); + non_nullable_variant_column = nullable_variant_column.getNestedColumnPtr()->assumeMutable(); + null_map = &nullable_variant_column.getNullMapData(); + } + else if (is_null_map_subcolumn) + { + null_map = &assert_cast(*variant_column).getData(); + } + + auto variant_serialization = variant_type->getDefaultSerialization(); + + const auto & nullable_shared_variant = assert_cast(*dynamic_element_state->shared_variant); + const auto & shared_null_map = nullable_shared_variant.getNullMapData(); + const auto & shared_variant = assert_cast(nullable_shared_variant.getNestedColumn()); + const FormatSettings format_settings; + for (size_t i = prev_size; i != shared_variant.size(); ++i) + { + if (!shared_null_map[i]) + { + auto value = shared_variant.getDataAt(i); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + if (type->getName() == dynamic_element_name) + { + /// When requested type is LowCardinality the subcolumn type name will be LowCardinality(Nullable). + /// Value in shared variant is serialized as LowCardinality and we cannot simply deserialize it + /// inside LowCardinality(Nullable) column (it will try to deserialize null bit). In this case we + /// have to create temporary LowCardinality column, deserialize value into it and insert it into + /// resulting LowCardinality(Nullable) (insertion from LowCardinality column to LowCardinality(Nullable) + /// column is allowed). + if (is_low_cardinality_nullable) + { + auto tmp_column = variant_type->createColumn(); + variant_serialization->deserializeBinary(*tmp_column, buf, format_settings); + non_nullable_variant_column->insertFrom(*tmp_column, 0); + } + else if (is_null_map_subcolumn) + { + null_map->push_back(0); + } + else + { + variant_serialization->deserializeBinary(*non_nullable_variant_column, buf, format_settings); + if (null_map) + null_map->push_back(0); + } + } + else + { + variant_column->insertDefault(); + } + } + else + { + variant_column->insertDefault(); + } + } + + /// Extract nested subcolumn if needed. + if (!nested_subcolumn.empty() && !is_null_map_subcolumn) + { + auto subcolumn = result_type->getSubcolumn(nested_subcolumn, variant_column->getPtr()); + result_column->assumeMutable()->insertRangeFrom(*subcolumn, 0, subcolumn->size()); + } } } diff --git a/src/DataTypes/Serializations/SerializationDynamicElement.h b/src/DataTypes/Serializations/SerializationDynamicElement.h index 127d14a55e0..c674cf479ae 100644 --- a/src/DataTypes/Serializations/SerializationDynamicElement.h +++ b/src/DataTypes/Serializations/SerializationDynamicElement.h @@ -13,11 +13,15 @@ private: /// To be able to deserialize Dynamic element as a subcolumn /// we need its type name and global discriminator. String dynamic_element_name; + /// Nested subcolumn of a type dynamic type. For example, for `Tuple(a UInt32)`.a + /// subcolumn dynamic_element_name = 'Tuple(a UInt32)' and nested_subcolumn = 'a'. + /// Needed to extract nested subcolumn from values in shared variant. + String nested_subcolumn; bool is_null_map_subcolumn; public: - SerializationDynamicElement(const SerializationPtr & nested_, const String & dynamic_element_name_, bool is_null_map_subcolumn_ = false) - : SerializationWrapper(nested_), dynamic_element_name(dynamic_element_name_), is_null_map_subcolumn(is_null_map_subcolumn_) + SerializationDynamicElement(const SerializationPtr & nested_, const String & dynamic_element_name_, const String & nested_subcolumn_, bool is_null_map_subcolumn_ = false) + : SerializationWrapper(nested_), dynamic_element_name(dynamic_element_name_), nested_subcolumn(nested_subcolumn_), is_null_map_subcolumn(is_null_map_subcolumn_) { } diff --git a/src/DataTypes/Serializations/SerializationFixedString.cpp b/src/DataTypes/Serializations/SerializationFixedString.cpp index f919dc16d33..688c71792fa 100644 --- a/src/DataTypes/Serializations/SerializationFixedString.cpp +++ b/src/DataTypes/Serializations/SerializationFixedString.cpp @@ -28,7 +28,7 @@ static constexpr size_t MAX_STRINGS_SIZE = 1ULL << 30; void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - const String & s = field.get(); + const String & s = field.safeGet(); ostr.write(s.data(), std::min(s.size(), n)); if (s.size() < n) for (size_t i = s.size(); i < n; ++i) @@ -39,7 +39,7 @@ void SerializationFixedString::serializeBinary(const Field & field, WriteBuffer void SerializationFixedString::deserializeBinary(Field & field, ReadBuffer & istr, const FormatSettings &) const { field = String(); - String & s = field.get(); + String & s = field.safeGet(); s.resize(n); istr.readStrict(s.data(), n); } diff --git a/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp b/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp index dfcd24aff58..c1beceb4533 100644 --- a/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp +++ b/src/DataTypes/Serializations/SerializationIPv4andIPv6.cpp @@ -125,7 +125,7 @@ bool SerializationIP::tryDeserializeTextCSV(DB::IColumn & column, DB::ReadB template void SerializationIP::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - IPv x = field.get(); + IPv x = field.safeGet(); if constexpr (std::is_same_v) writeBinary(x, ostr); else diff --git a/src/DataTypes/Serializations/SerializationMap.cpp b/src/DataTypes/Serializations/SerializationMap.cpp index 0bef3c7d79d..c722b3ac7a1 100644 --- a/src/DataTypes/Serializations/SerializationMap.cpp +++ b/src/DataTypes/Serializations/SerializationMap.cpp @@ -40,7 +40,7 @@ static IColumn & extractNestedColumn(IColumn & column) void SerializationMap::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & map = field.get(); + const auto & map = field.safeGet(); writeVarUInt(map.size(), ostr); for (const auto & elem : map) { @@ -63,7 +63,7 @@ void SerializationMap::deserializeBinary(Field & field, ReadBuffer & istr, const size, settings.binary.max_binary_string_size); field = Map(); - Map & map = field.get(); + Map & map = field.safeGet(); map.reserve(size); for (size_t i = 0; i < size; ++i) { diff --git a/src/DataTypes/Serializations/SerializationNumber.cpp b/src/DataTypes/Serializations/SerializationNumber.cpp index bdb4dfc6735..bfc13af8ca3 100644 --- a/src/DataTypes/Serializations/SerializationNumber.cpp +++ b/src/DataTypes/Serializations/SerializationNumber.cpp @@ -169,7 +169,7 @@ template void SerializationNumber::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { /// ColumnVector::ValueType is a narrower type. For example, UInt8, when the Field type is UInt64 - typename ColumnVector::ValueType x = static_cast::ValueType>(field.get()); + typename ColumnVector::ValueType x = static_cast::ValueType>(field.safeGet()); writeBinaryLittleEndian(x, ostr); } diff --git a/src/DataTypes/Serializations/SerializationString.cpp b/src/DataTypes/Serializations/SerializationString.cpp index 9e523d0d745..ac5d4e3e128 100644 --- a/src/DataTypes/Serializations/SerializationString.cpp +++ b/src/DataTypes/Serializations/SerializationString.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes void SerializationString::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const String & s = field.get(); + const String & s = field.safeGet(); if (settings.binary.max_binary_string_size && s.size() > settings.binary.max_binary_string_size) throw Exception( ErrorCodes::TOO_LARGE_STRING_SIZE, @@ -59,7 +59,7 @@ void SerializationString::deserializeBinary(Field & field, ReadBuffer & istr, co settings.binary.max_binary_string_size); field = String(); - String & s = field.get(); + String & s = field.safeGet(); s.resize(size); istr.readStrict(s.data(), size); } diff --git a/src/DataTypes/Serializations/SerializationTuple.cpp b/src/DataTypes/Serializations/SerializationTuple.cpp index 7a5227ca752..594a23ab507 100644 --- a/src/DataTypes/Serializations/SerializationTuple.cpp +++ b/src/DataTypes/Serializations/SerializationTuple.cpp @@ -34,7 +34,7 @@ static inline const IColumn & extractElementColumn(const IColumn & column, size_ void SerializationTuple::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings & settings) const { - const auto & tuple = field.get(); + const auto & tuple = field.safeGet(); for (size_t element_index = 0; element_index < elems.size(); ++element_index) { const auto & serialization = elems[element_index]; @@ -47,7 +47,7 @@ void SerializationTuple::deserializeBinary(Field & field, ReadBuffer & istr, con const size_t size = elems.size(); field = Tuple(); - Tuple & tuple = field.get(); + Tuple & tuple = field.safeGet(); tuple.reserve(size); for (size_t i = 0; i < size; ++i) elems[i]->deserializeBinary(tuple.emplace_back(), istr, settings); diff --git a/src/DataTypes/Serializations/SerializationUUID.cpp b/src/DataTypes/Serializations/SerializationUUID.cpp index 58178a896dc..f18466ad8ad 100644 --- a/src/DataTypes/Serializations/SerializationUUID.cpp +++ b/src/DataTypes/Serializations/SerializationUUID.cpp @@ -137,7 +137,7 @@ bool SerializationUUID::tryDeserializeTextCSV(IColumn & column, ReadBuffer & ist void SerializationUUID::serializeBinary(const Field & field, WriteBuffer & ostr, const FormatSettings &) const { - UUID x = field.get(); + UUID x = field.safeGet(); writeBinaryLittleEndian(x, ostr); } diff --git a/src/DataTypes/Serializations/SerializationVariantElement.cpp b/src/DataTypes/Serializations/SerializationVariantElement.cpp index 03b5d9584e0..36dc85f60ee 100644 --- a/src/DataTypes/Serializations/SerializationVariantElement.cpp +++ b/src/DataTypes/Serializations/SerializationVariantElement.cpp @@ -305,8 +305,10 @@ SerializationVariantElement::VariantSubcolumnCreator::VariantSubcolumnCreator( const String & variant_element_name_, ColumnVariant::Discriminator global_variant_discriminator_, ColumnVariant::Discriminator local_variant_discriminator_, - bool make_nullable_) + bool make_nullable_, + const ColumnPtr & null_map_) : local_discriminators(local_discriminators_) + , null_map(null_map_) , variant_element_name(variant_element_name_) , global_variant_discriminator(global_variant_discriminator_) , local_variant_discriminator(local_variant_discriminator_) @@ -314,12 +316,13 @@ SerializationVariantElement::VariantSubcolumnCreator::VariantSubcolumnCreator( { } -DataTypePtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB::DataTypePtr & prev) const + +DataTypePtr SerializationVariantElement::VariantSubcolumnCreator::create(const DataTypePtr & prev) const { return make_nullable ? makeNullableOrLowCardinalityNullableSafe(prev) : prev; } -SerializationPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB::SerializationPtr & prev) const +SerializationPtr SerializationVariantElement::VariantSubcolumnCreator::create(const SerializationPtr & prev) const { return std::make_shared(prev, variant_element_name, global_variant_discriminator); } @@ -339,12 +342,16 @@ ColumnPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB: return res; } - /// In general case we should iterate through discriminators and create null-map for our variant. - NullMap null_map; - null_map.reserve(local_discriminators->size()); - const auto & local_discriminators_data = assert_cast(*local_discriminators).getData(); - for (auto local_discr : local_discriminators_data) - null_map.push_back(local_discr != local_variant_discriminator); + /// In general case we should iterate through discriminators and create null-map for our variant if we don't already have it. + std::optional null_map_from_discriminators; + if (!null_map) + { + null_map_from_discriminators = NullMap(); + null_map_from_discriminators->reserve(local_discriminators->size()); + const auto & local_discriminators_data = assert_cast(*local_discriminators).getData(); + for (auto local_discr : local_discriminators_data) + null_map_from_discriminators->push_back(local_discr != local_variant_discriminator); + } /// Now we can create new column from null-map and variant column using IColumn::expand. auto res_column = IColumn::mutate(prev); @@ -356,13 +363,21 @@ ColumnPtr SerializationVariantElement::VariantSubcolumnCreator::create(const DB: if (make_nullable && prev->lowCardinality()) res_column = assert_cast(*res_column).cloneNullable(); - res_column->expand(null_map, /*inverted = */ true); + if (null_map_from_discriminators) + res_column->expand(*null_map_from_discriminators, /*inverted = */ true); + else + res_column->expand(assert_cast(*null_map).getData(), /*inverted = */ true); if (make_nullable && prev->canBeInsideNullable()) { - auto null_map_col = ColumnUInt8::create(); - null_map_col->getData() = std::move(null_map); - return ColumnNullable::create(std::move(res_column), std::move(null_map_col)); + if (null_map_from_discriminators) + { + auto null_map_col = ColumnUInt8::create(); + null_map_col->getData() = std::move(*null_map_from_discriminators); + return ColumnNullable::create(std::move(res_column), std::move(null_map_col)); + } + + return ColumnNullable::create(std::move(res_column), null_map->assumeMutable()); } return res_column; diff --git a/src/DataTypes/Serializations/SerializationVariantElement.h b/src/DataTypes/Serializations/SerializationVariantElement.h index 69101aea0f5..64f86eb2190 100644 --- a/src/DataTypes/Serializations/SerializationVariantElement.h +++ b/src/DataTypes/Serializations/SerializationVariantElement.h @@ -63,18 +63,22 @@ public: struct VariantSubcolumnCreator : public ISubcolumnCreator { + private: const ColumnPtr local_discriminators; + const ColumnPtr null_map; /// optional const String variant_element_name; const ColumnVariant::Discriminator global_variant_discriminator; const ColumnVariant::Discriminator local_variant_discriminator; bool make_nullable; + public: VariantSubcolumnCreator( const ColumnPtr & local_discriminators_, const String & variant_element_name_, ColumnVariant::Discriminator global_variant_discriminator_, ColumnVariant::Discriminator local_variant_discriminator_, - bool make_nullable_); + bool make_nullable_, + const ColumnPtr & null_map_ = nullptr); DataTypePtr create(const DataTypePtr & prev) const override; ColumnPtr create(const ColumnPtr & prev) const override; diff --git a/src/DataTypes/fuzzers/CMakeLists.txt b/src/DataTypes/fuzzers/CMakeLists.txt index bc640358673..e54ef0a860c 100644 --- a/src/DataTypes/fuzzers/CMakeLists.txt +++ b/src/DataTypes/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(data_type_deserialization_fuzzer data_type_deserialization_fuzzer.cpp ${SRCS}) -target_link_libraries(data_type_deserialization_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions) +target_link_libraries(data_type_deserialization_fuzzer PRIVATE clickhouse_functions clickhouse_aggregate_functions) diff --git a/src/DataTypes/registerDataTypeDateTime.cpp b/src/DataTypes/registerDataTypeDateTime.cpp index 84a52d4affb..9a632bd381b 100644 --- a/src/DataTypes/registerDataTypeDateTime.cpp +++ b/src/DataTypes/registerDataTypeDateTime.cpp @@ -55,7 +55,7 @@ getArgument(const ASTPtr & arguments, size_t argument_index, const char * argume } } - return argument->value.get(); + return argument->value.safeGet(); } static DataTypePtr create(const ASTPtr & arguments) diff --git a/src/Databases/DDLLoadingDependencyVisitor.cpp b/src/Databases/DDLLoadingDependencyVisitor.cpp index 67bce915168..b91aa84ecd3 100644 --- a/src/Databases/DDLLoadingDependencyVisitor.cpp +++ b/src/Databases/DDLLoadingDependencyVisitor.cpp @@ -183,7 +183,7 @@ void DDLLoadingDependencyVisitor::extractTableNameFromArgument(const ASTFunction if (name->value.getType() != Field::Types::String) return; - auto maybe_qualified_name = QualifiedTableName::tryParseFromString(name->value.get()); + auto maybe_qualified_name = QualifiedTableName::tryParseFromString(name->value.safeGet()); if (!maybe_qualified_name) return; @@ -194,7 +194,7 @@ void DDLLoadingDependencyVisitor::extractTableNameFromArgument(const ASTFunction if (literal->value.getType() != Field::Types::String) return; - auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.get()); + auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.safeGet()); /// Just return if name if invalid if (!maybe_qualified_name) return; diff --git a/src/Databases/DDLRenamingVisitor.cpp b/src/Databases/DDLRenamingVisitor.cpp index 38e100e2470..7556223b30e 100644 --- a/src/Databases/DDLRenamingVisitor.cpp +++ b/src/Databases/DDLRenamingVisitor.cpp @@ -180,7 +180,7 @@ namespace if (database_name_field && table_name_field) { - QualifiedTableName qualified_name{database_name_field->get(), table_name_field->get()}; + QualifiedTableName qualified_name{database_name_field->safeGet(), table_name_field->safeGet()}; if (!qualified_name.database.empty() && !qualified_name.table.empty()) { auto new_qualified_name = data.renaming_map.getNewTableName(qualified_name); @@ -207,7 +207,7 @@ namespace if (literal->value.getType() != Field::Types::String) return; - auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.get()); + auto maybe_qualified_name = QualifiedTableName::tryParseFromString(literal->value.safeGet()); /// Just return if name if invalid if (!maybe_qualified_name || maybe_qualified_name->database.empty() || maybe_qualified_name->table.empty()) return; @@ -247,7 +247,7 @@ namespace if (!literal || (literal->value.getType() != Field::Types::String)) return; - auto database_name = literal->value.get(); + auto database_name = literal->value.safeGet(); if (database_name.empty()) return; diff --git a/src/Databases/DatabaseReplicated.cpp b/src/Databases/DatabaseReplicated.cpp index fe00c1c60aa..85217ba5c04 100644 --- a/src/Databases/DatabaseReplicated.cpp +++ b/src/Databases/DatabaseReplicated.cpp @@ -833,8 +833,8 @@ void DatabaseReplicated::checkTableEngine(const ASTCreateQuery & query, ASTStora if (!arg1 || !arg2 || arg1->value.getType() != Field::Types::String || arg2->value.getType() != Field::Types::String) return; - String maybe_path = arg1->value.get(); - String maybe_replica = arg2->value.get(); + String maybe_path = arg1->value.safeGet(); + String maybe_replica = arg2->value.safeGet(); /// Looks like it's ReplicatedMergeTree with explicit zookeeper_path and replica_name arguments. /// Let's ensure that some macros are used. @@ -1153,8 +1153,7 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep query_context->setSetting("allow_experimental_object_type", 1); query_context->setSetting("allow_experimental_variant_type", 1); query_context->setSetting("allow_experimental_dynamic_type", 1); - query_context->setSetting("allow_experimental_annoy_index", 1); - query_context->setSetting("allow_experimental_usearch_index", 1); + query_context->setSetting("allow_experimental_vector_similarity_index", 1); query_context->setSetting("allow_experimental_bigint_types", 1); query_context->setSetting("allow_experimental_window_functions", 1); query_context->setSetting("allow_experimental_geo_types", 1); @@ -1584,6 +1583,8 @@ void DatabaseReplicated::dropTable(ContextPtr local_context, const String & tabl } auto table = tryGetTable(table_name, getContext()); + if (!table) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Table {} doesn't exist", table_name); if (table->getName() == "MaterializedView" || table->getName() == "WindowView") { /// Avoid recursive locking of metadata_mutex diff --git a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp index 04b4070d5af..1364e9ae2b2 100644 --- a/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp +++ b/src/Databases/MySQL/MaterializedMySQLSyncThread.cpp @@ -736,11 +736,11 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) - casted_column->insertValue(static_cast(value.template get())); + casted_column->insertValue(static_cast(value.template safeGet())); } }; @@ -776,17 +776,17 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) { if (value.getType() == Field::Types::UInt64) - casted_int32_column->insertValue(static_cast(value.get())); + casted_int32_column->insertValue(static_cast(value.safeGet())); else if (value.getType() == Field::Types::Int64) { /// For MYSQL_TYPE_INT24 - const Int32 & num = static_cast(value.get()); + const Int32 & num = static_cast(value.safeGet()); casted_int32_column->insertValue(num & 0x800000 ? num | 0xFF000000 : num); } else @@ -798,7 +798,7 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) @@ -812,12 +812,12 @@ static void writeFieldsToColumn( { for (size_t index = 0; index < rows_data.size(); ++index) { - const Tuple & row_data = rows_data[index].get(); + const Tuple & row_data = rows_data[index].safeGet(); const Field & value = row_data[column_index]; if (write_data_to_null_map(value, index)) { - const String & data = value.get(); + const String & data = value.safeGet(); casted_fixed_string_column->insertData(data.data(), data.size()); } } @@ -864,7 +864,7 @@ static inline size_t onUpdateData(const Row & rows_data, Block & buffer, size_t { writeable_rows_mask[index + 1] = true; writeable_rows_mask[index] = differenceSortingKeys( - rows_data[index].get(), rows_data[index + 1].get(), sorting_columns_index); + rows_data[index].safeGet(), rows_data[index + 1].safeGet(), sorting_columns_index); } for (size_t column = 0; column < buffer.columns() - 2; ++column) diff --git a/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp b/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp index 11299c5b8b1..6f1ba26ee33 100644 --- a/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp +++ b/src/Databases/MySQL/tests/gtest_mysql_binlog.cpp @@ -281,12 +281,12 @@ static void testFile1(IBinlog & binlog, UInt64 timeout, bool filtered = false) ASSERT_EQ(write_event->table, "a"); ASSERT_EQ(write_event->rows.size(), 1); ASSERT_EQ(write_event->rows[0].getType(), Field::Types::Tuple); - auto row_data = write_event->rows[0].get(); + auto row_data = write_event->rows[0].safeGet(); ASSERT_EQ(row_data.size(), 4u); - ASSERT_EQ(row_data[0].get(), 1u); - ASSERT_EQ(row_data[1].get(), 1u); - ASSERT_EQ(row_data[2].get(), 1u); - ASSERT_EQ(row_data[3].get(), 1u); + ASSERT_EQ(row_data[0].safeGet(), 1u); + ASSERT_EQ(row_data[1].safeGet(), 1u); + ASSERT_EQ(row_data[2].safeGet(), 1u); + ASSERT_EQ(row_data[3].safeGet(), 1u); ASSERT_TRUE(binlog.tryReadEvent(event, timeout)); ++count; @@ -342,18 +342,18 @@ static void testFile1(IBinlog & binlog, UInt64 timeout, bool filtered = false) ASSERT_EQ(update_event->table, "a"); ASSERT_EQ(update_event->rows.size(), 2); ASSERT_EQ(update_event->rows[0].getType(), Field::Types::Tuple); - row_data = update_event->rows[0].get(); + row_data = update_event->rows[0].safeGet(); ASSERT_EQ(row_data.size(), 4u); - ASSERT_EQ(row_data[0].get(), 1u); - ASSERT_EQ(row_data[1].get(), 1u); - ASSERT_EQ(row_data[2].get(), 1u); - ASSERT_EQ(row_data[3].get(), 1u); - row_data = update_event->rows[1].get(); + ASSERT_EQ(row_data[0].safeGet(), 1u); + ASSERT_EQ(row_data[1].safeGet(), 1u); + ASSERT_EQ(row_data[2].safeGet(), 1u); + ASSERT_EQ(row_data[3].safeGet(), 1u); + row_data = update_event->rows[1].safeGet(); ASSERT_EQ(row_data.size(), 4u); - ASSERT_EQ(row_data[0].get(), 1u); - ASSERT_EQ(row_data[1].get(), 2u); - ASSERT_EQ(row_data[2].get(), 1u); - ASSERT_EQ(row_data[3].get(), 1u); + ASSERT_EQ(row_data[0].safeGet(), 1u); + ASSERT_EQ(row_data[1].safeGet(), 2u); + ASSERT_EQ(row_data[2].safeGet(), 1u); + ASSERT_EQ(row_data[3].safeGet(), 1u); ASSERT_TRUE(binlog.tryReadEvent(event, timeout)); ++count; diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp index 943f3ae502e..b9fd9c325f8 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.cpp @@ -196,7 +196,7 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( } else { - std::tuple row; + std::tuple row; while (stream >> row) { const auto column_name = std::get<0>(row); @@ -206,13 +206,14 @@ PostgreSQLTableStructure::ColumnsInfoPtr readNamesAndTypesList( std::get<3>(row)); columns.push_back(NameAndTypePair(column_name, data_type)); - auto attgenerated = std::get<6>(row); + auto attgenerated = std::get<7>(row); attributes.emplace( column_name, PostgreSQLTableStructure::PGAttribute{ .atttypid = parse(std::get<4>(row)), .atttypmod = parse(std::get<5>(row)), + .attnum = parse(std::get<6>(row)), .atthasdef = false, .attgenerated = attgenerated.empty() ? char{} : char(attgenerated[0]), .attr_def = {} @@ -308,6 +309,7 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "attndims AS dims, " /// array dimensions "atttypid as type_id, " "atttypmod as type_modifier, " + "attnum as att_num, " "attgenerated as generated " /// if column has GENERATED "FROM pg_attribute " "WHERE attrelid = (SELECT oid FROM pg_class WHERE {}) " @@ -338,17 +340,29 @@ PostgreSQLTableStructure fetchPostgreSQLTableStructure( "WHERE adrelid = (SELECT oid FROM pg_class WHERE {});", where); pqxx::result result{tx.exec(attrdef_query)}; - for (const auto row : result) + if (static_cast(result.size()) > table.physical_columns->names.size()) { - size_t adnum = row[0].as(); - if (!adnum || adnum > table.physical_columns->names.size()) + throw Exception(ErrorCodes::LOGICAL_ERROR, + "Received {} attrdef, but currently fetched columns list has {} columns", + result.size(), table.physical_columns->attributes.size()); + } + + for (const auto & column_attrs : table.physical_columns->attributes) + { + if (column_attrs.second.attgenerated != 's') /// e.g. not a generated column { - throw Exception(ErrorCodes::LOGICAL_ERROR, - "Received adnum {}, but currently fetched columns list has {} columns", - adnum, table.physical_columns->attributes.size()); + continue; + } + + for (const auto row : result) + { + int adnum = row[0].as(); + if (column_attrs.second.attnum == adnum) + { + table.physical_columns->attributes.at(column_attrs.first).attr_def = row[1].as(); + break; + } } - const auto column_name = table.physical_columns->names[adnum - 1]; - table.physical_columns->attributes.at(column_name).attr_def = row[1].as(); } } diff --git a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h index 81bf7b278fc..25ece6909fd 100644 --- a/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h +++ b/src/Databases/PostgreSQL/fetchPostgreSQLTableStructure.h @@ -16,6 +16,7 @@ struct PostgreSQLTableStructure { Int32 atttypid; Int32 atttypmod; + Int32 attnum; bool atthasdef; char attgenerated; std::string attr_def; diff --git a/src/Dictionaries/CacheDictionaryStorage.h b/src/Dictionaries/CacheDictionaryStorage.h index 47f99bd1093..781822533e9 100644 --- a/src/Dictionaries/CacheDictionaryStorage.h +++ b/src/Dictionaries/CacheDictionaryStorage.h @@ -395,13 +395,13 @@ private: } else if constexpr (std::is_same_v) { - const String & string_value = column_value.get(); + const String & string_value = column_value.safeGet(); StringRef inserted_value = copyStringInArena(arena, string_value); container.back() = inserted_value; } else { - container.back() = static_cast(column_value.get()); + container.back() = static_cast(column_value.safeGet()); } }); } @@ -441,7 +441,7 @@ private: } else if constexpr (std::is_same_v) { - const String & string_value = column_value.get(); + const String & string_value = column_value.safeGet(); StringRef inserted_value = copyStringInArena(arena, string_value); if (!cell_was_default) @@ -454,7 +454,7 @@ private: } else { - container[index_to_use] = static_cast(column_value.get()); + container[index_to_use] = static_cast(column_value.safeGet()); } }); } @@ -651,12 +651,12 @@ private: } else if constexpr (std::is_same_v) { - auto & value = default_value.get(); + auto & value = default_value.safeGet(); value_setter(value); } else { - value_setter(default_value.get()); + value_setter(default_value.safeGet()); } } else diff --git a/src/Dictionaries/ClickHouseDictionarySource.cpp b/src/Dictionaries/ClickHouseDictionarySource.cpp index bf16f315ddf..b36d53a6159 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.cpp +++ b/src/Dictionaries/ClickHouseDictionarySource.cpp @@ -51,6 +51,8 @@ namespace configuration.db, configuration.user, configuration.password, + configuration.proto_send_chunked, + configuration.proto_recv_chunked, configuration.quota_key, "", /* cluster */ "", /* cluster_secret */ @@ -222,7 +224,7 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) { validateNamedCollection( *named_collection, {}, ValidateKeysMultiset{ - "secure", "host", "hostname", "port", "user", "username", "password", "quota_key", "name", + "secure", "host", "hostname", "port", "user", "username", "password", "proto_send_chunked", "proto_recv_chunked", "quota_key", "name", "db", "database", "table","query", "where", "invalidate_query", "update_field", "update_lag"}); const auto secure = named_collection->getOrDefault("secure", false); @@ -234,6 +236,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = named_collection->getAnyOrDefault({"user", "username"}, "default"), .password = named_collection->getOrDefault("password", ""), + .proto_send_chunked = named_collection->getOrDefault("proto_send_chunked", "notchunked"), + .proto_recv_chunked = named_collection->getOrDefault("proto_recv_chunked", "notchunked"), .quota_key = named_collection->getOrDefault("quota_key", ""), .db = named_collection->getAnyOrDefault({"db", "database"}, default_database), .table = named_collection->getOrDefault("table", ""), @@ -258,6 +262,8 @@ void registerDictionarySourceClickHouse(DictionarySourceFactory & factory) .host = host, .user = config.getString(settings_config_prefix + ".user", "default"), .password = config.getString(settings_config_prefix + ".password", ""), + .proto_send_chunked = config.getString(settings_config_prefix + ".proto_caps.send", "notchunked"), + .proto_recv_chunked = config.getString(settings_config_prefix + ".proto_caps.recv", "notchunked"), .quota_key = config.getString(settings_config_prefix + ".quota_key", ""), .db = config.getString(settings_config_prefix + ".db", default_database), .table = config.getString(settings_config_prefix + ".table", ""), diff --git a/src/Dictionaries/ClickHouseDictionarySource.h b/src/Dictionaries/ClickHouseDictionarySource.h index 3357514eab2..faf9e5f8009 100644 --- a/src/Dictionaries/ClickHouseDictionarySource.h +++ b/src/Dictionaries/ClickHouseDictionarySource.h @@ -23,6 +23,8 @@ public: const std::string host; const std::string user; const std::string password; + const std::string proto_send_chunked; + const std::string proto_recv_chunked; const std::string quota_key; const std::string db; const std::string table; diff --git a/src/Dictionaries/DictionaryHelpers.h b/src/Dictionaries/DictionaryHelpers.h index 64fc05e99ab..43fd39640c3 100644 --- a/src/Dictionaries/DictionaryHelpers.h +++ b/src/Dictionaries/DictionaryHelpers.h @@ -345,7 +345,7 @@ public: if (attribute_default_value.isNull()) default_value_is_null = true; else - default_value = static_cast(attribute_default_value.get()); + default_value = static_cast(attribute_default_value.safeGet()); } else { @@ -377,7 +377,7 @@ public: if constexpr (std::is_same_v) { Field field = (*default_values_column)[row]; - return field.get(); + return field.safeGet(); } else if constexpr (std::is_same_v) return default_values_column->getDataAt(row); diff --git a/src/Dictionaries/FlatDictionary.cpp b/src/Dictionaries/FlatDictionary.cpp index 999160226d9..b0233766741 100644 --- a/src/Dictionaries/FlatDictionary.cpp +++ b/src/Dictionaries/FlatDictionary.cpp @@ -245,7 +245,7 @@ ColumnPtr FlatDictionary::getHierarchy(ColumnPtr key_column, const DataTypePtr & std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const ContainerType & parent_keys = std::get>(hierarchical_attribute.container); @@ -300,7 +300,7 @@ ColumnUInt8::Ptr FlatDictionary::isInHierarchy( std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const ContainerType & parent_keys = std::get>(hierarchical_attribute.container); @@ -701,7 +701,7 @@ void FlatDictionary::setAttributeValue(Attribute & attribute, const UInt64 key, return; } - auto & attribute_value = value.get(); + auto & attribute_value = value.safeGet(); auto & container = std::get>(attribute.container); loaded_keys[key] = true; diff --git a/src/Dictionaries/HashedArrayDictionary.cpp b/src/Dictionaries/HashedArrayDictionary.cpp index d7d50dfb0a6..8768be8e5ec 100644 --- a/src/Dictionaries/HashedArrayDictionary.cpp +++ b/src/Dictionaries/HashedArrayDictionary.cpp @@ -240,7 +240,7 @@ ColumnPtr HashedArrayDictionary::getHierarchy(Colu std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); auto is_key_valid_func = [&, this](auto & key) @@ -313,7 +313,7 @@ ColumnUInt8::Ptr HashedArrayDictionary::isInHierar std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); auto is_key_valid_func = [&](auto & key) @@ -581,13 +581,13 @@ void HashedArrayDictionary::blockToAttributes(cons if constexpr (std::is_same_v) { - String & value_to_insert = column_value_to_insert.get(); + String & value_to_insert = column_value_to_insert.safeGet(); StringRef string_in_arena_reference = copyStringInArena(*string_arenas[shard], value_to_insert); attribute_container.back() = string_in_arena_reference; } else { - auto value_to_insert = static_cast(column_value_to_insert.get()); + auto value_to_insert = static_cast(column_value_to_insert.safeGet()); attribute_container.back() = value_to_insert; } }; diff --git a/src/Dictionaries/HashedDictionary.h b/src/Dictionaries/HashedDictionary.h index 3a2b61e5149..7e935fe4855 100644 --- a/src/Dictionaries/HashedDictionary.h +++ b/src/Dictionaries/HashedDictionary.h @@ -636,7 +636,7 @@ ColumnPtr HashedDictionary::getHierarchy(C std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const CollectionsHolder & child_key_to_parent_key_maps = std::get>(hierarchical_attribute.containers); @@ -710,7 +710,7 @@ ColumnUInt8::Ptr HashedDictionary::isInHie std::optional null_value; if (!dictionary_attribute.null_value.isNull()) - null_value = dictionary_attribute.null_value.get(); + null_value = dictionary_attribute.null_value.safeGet(); const CollectionsHolder & child_key_to_parent_key_maps = std::get>(hierarchical_attribute.containers); @@ -1004,13 +1004,13 @@ void HashedDictionary::blockToAttributes(c if constexpr (std::is_same_v) { - String & value_to_insert = column_value_to_insert.get(); + String & value_to_insert = column_value_to_insert.safeGet(); StringRef arena_value = copyStringInArena(*string_arenas[shard], value_to_insert); container.insert({key, arena_value}); } else { - auto value_to_insert = static_cast(column_value_to_insert.get()); + auto value_to_insert = static_cast(column_value_to_insert.safeGet()); container.insert({key, value_to_insert}); } diff --git a/src/Dictionaries/HierarchyDictionariesUtils.cpp b/src/Dictionaries/HierarchyDictionariesUtils.cpp index e1119982a34..de532ade26d 100644 --- a/src/Dictionaries/HierarchyDictionariesUtils.cpp +++ b/src/Dictionaries/HierarchyDictionariesUtils.cpp @@ -50,7 +50,7 @@ namespace std::optional null_value; if (!hierarchical_attribute.null_value.isNull()) - null_value = hierarchical_attribute.null_value.get(); + null_value = hierarchical_attribute.null_value.safeGet(); ColumnPtr key_to_request_column = ColumnVector::create(); auto * key_to_request_column_typed = static_cast *>(key_to_request_column->assumeMutable().get()); @@ -190,7 +190,7 @@ ColumnPtr getKeysHierarchyDefaultImplementation( std::optional null_value; if (!hierarchical_attribute.null_value.isNull()) - null_value = hierarchical_attribute.null_value.get(); + null_value = hierarchical_attribute.null_value.safeGet(); auto get_parent_key_func = [&](auto & key) { @@ -252,7 +252,7 @@ ColumnUInt8::Ptr getKeysIsInHierarchyDefaultImplementation( std::optional null_value; if (!hierarchical_attribute.null_value.isNull()) - null_value = hierarchical_attribute.null_value.get(); + null_value = hierarchical_attribute.null_value.safeGet(); auto get_parent_key_func = [&](auto & key) { diff --git a/src/Dictionaries/IPAddressDictionary.cpp b/src/Dictionaries/IPAddressDictionary.cpp index 41fafcc162b..4f9e991752f 100644 --- a/src/Dictionaries/IPAddressDictionary.cpp +++ b/src/Dictionaries/IPAddressDictionary.cpp @@ -613,14 +613,14 @@ void IPAddressDictionary::calculateBytesAllocated() template void IPAddressDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value) { - attribute.null_values = null_value.isNull() ? T{} : T(null_value.get()); + attribute.null_values = null_value.isNull() ? T{} : T(null_value.safeGet()); attribute.maps.emplace>(); } template <> void IPAddressDictionary::createAttributeImpl(Attribute & attribute, const Field & null_value) { - attribute.null_values = null_value.isNull() ? String() : null_value.get(); + attribute.null_values = null_value.isNull() ? String() : null_value.safeGet(); attribute.maps.emplace>(); attribute.string_arena = std::make_unique(); } @@ -976,13 +976,13 @@ void IPAddressDictionary::setAttributeValue(Attribute & attribute, const Field & if constexpr (std::is_same_v) { - const auto & string = value.get(); + const auto & string = value.safeGet(); const auto * string_in_arena = attribute.string_arena->insert(string.data(), string.size()); setAttributeValueImpl(attribute, StringRef{string_in_arena, string.size()}); } else { - setAttributeValueImpl(attribute, static_cast(value.get())); + setAttributeValueImpl(attribute, static_cast(value.safeGet())); } }; diff --git a/src/Dictionaries/MongoDBDictionarySource.cpp b/src/Dictionaries/MongoDBDictionarySource.cpp index 46910fa9f6a..c30a6f90e44 100644 --- a/src/Dictionaries/MongoDBDictionarySource.cpp +++ b/src/Dictionaries/MongoDBDictionarySource.cpp @@ -1,7 +1,6 @@ #include "MongoDBDictionarySource.h" #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" -#include "registerDictionaries.h" #include #include @@ -233,7 +232,7 @@ QueryPipeline MongoDBDictionarySource::loadKeys(const Columns & key_columns, con } case AttributeUnderlyingType::String: { - String loaded_str((*key_columns[attribute_index])[row_idx].get()); + String loaded_str((*key_columns[attribute_index])[row_idx].safeGet()); /// Convert string to ObjectID if (key_attribute.is_object_id) { diff --git a/src/Dictionaries/PolygonDictionary.cpp b/src/Dictionaries/PolygonDictionary.cpp index dfc920623e3..ff29ca1f6b8 100644 --- a/src/Dictionaries/PolygonDictionary.cpp +++ b/src/Dictionaries/PolygonDictionary.cpp @@ -141,7 +141,7 @@ ColumnPtr IPolygonDictionary::getColumn( { getItemsShortCircuitImpl( requested_key_points, - [&](size_t row) { return (*attribute_values_column)[row].get(); }, + [&](size_t row) { return (*attribute_values_column)[row].safeGet(); }, [&](Array & value) { result_column_typed.insert(value); }, default_mask.value()); } @@ -149,7 +149,7 @@ ColumnPtr IPolygonDictionary::getColumn( { getItemsImpl( requested_key_points, - [&](size_t row) { return (*attribute_values_column)[row].get(); }, + [&](size_t row) { return (*attribute_values_column)[row].safeGet(); }, [&](Array & value) { result_column_typed.insert(value); }, default_value_provider.value()); } @@ -432,16 +432,16 @@ void IPolygonDictionary::getItemsImpl( } else if constexpr (std::is_same_v) { - set_value(default_value.get()); + set_value(default_value.safeGet()); } else if constexpr (std::is_same_v) { - auto default_value_string = default_value.get(); + auto default_value_string = default_value.safeGet(); set_value(default_value_string); } else { - set_value(default_value.get>()); + set_value(default_value.safeGet>()); } } } diff --git a/src/Dictionaries/RangeHashedDictionary.h b/src/Dictionaries/RangeHashedDictionary.h index fc6c98990d0..c264b480bcb 100644 --- a/src/Dictionaries/RangeHashedDictionary.h +++ b/src/Dictionaries/RangeHashedDictionary.h @@ -906,13 +906,13 @@ void RangeHashedDictionary::setAttributeValue(Attribute & a if constexpr (std::is_same_v) { - const auto & string = value.get(); + const auto & string = value.safeGet(); StringRef string_ref = copyStringInArena(string_arena, string); value_to_insert = string_ref; } else { - value_to_insert = static_cast(value.get()); + value_to_insert = static_cast(value.safeGet()); } container.back() = value_to_insert; diff --git a/src/Dictionaries/RedisDictionarySource.cpp b/src/Dictionaries/RedisDictionarySource.cpp index 1736cdff306..9db639a0ca4 100644 --- a/src/Dictionaries/RedisDictionarySource.cpp +++ b/src/Dictionaries/RedisDictionarySource.cpp @@ -1,7 +1,6 @@ #include "RedisDictionarySource.h" #include "DictionarySourceFactory.h" #include "DictionaryStructure.h" -#include "registerDictionaries.h" #include #include @@ -160,7 +159,7 @@ namespace DB if (isInteger(type)) key << DB::toString(key_columns[i]->get64(row)); else if (isString(type)) - key << (*key_columns[i])[row].get(); + key << (*key_columns[i])[row].safeGet(); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Unexpected type of key in Redis dictionary"); } diff --git a/src/Disks/DiskFomAST.cpp b/src/Disks/DiskFomAST.cpp index b2f1280c507..5329ff8748a 100644 --- a/src/Disks/DiskFomAST.cpp +++ b/src/Disks/DiskFomAST.cpp @@ -132,7 +132,7 @@ std::string DiskFomAST::createCustomDisk(const ASTPtr & disk_function_ast, Conte FlattenDiskConfigurationVisitor::Data data{context, attach}; FlattenDiskConfigurationVisitor{data}.visit(ast); - return assert_cast(*ast).value.get(); + return assert_cast(*ast).value.safeGet(); } void DiskFomAST::ensureDiskIsNotCustom(const std::string & disk_name, ContextPtr context) diff --git a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp index 433a0e96d2e..7205b5b3294 100644 --- a/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp +++ b/src/Disks/ObjectStorages/S3/S3ObjectStorage.cpp @@ -305,7 +305,8 @@ void S3ObjectStorage::listObjects(const std::string & path, RelativePathsWithMet S3::ListObjectsV2Request request; request.SetBucket(uri.bucket); - request.SetPrefix(path); + if (path != "/") + request.SetPrefix(path); if (max_keys) request.SetMaxKeys(static_cast(max_keys)); else diff --git a/src/Formats/JSONExtractTree.cpp b/src/Formats/JSONExtractTree.cpp index 242d2dc9f80..86fde3852b8 100644 --- a/src/Formats/JSONExtractTree.cpp +++ b/src/Formats/JSONExtractTree.cpp @@ -1362,13 +1362,14 @@ public: } auto & variant_column = column_dynamic.getVariantColumn(); - auto variant_info = column_dynamic.getVariantInfo(); + const auto & variant_info = column_dynamic.getVariantInfo(); /// Second, infer ClickHouse type for this element and add it as a new variant. auto element_type = elementToDataType(element, format_settings); - if (column_dynamic.addNewVariant(element_type)) + auto element_type_name = element_type->getName(); + if (column_dynamic.addNewVariant(element_type, element_type_name)) { auto node = buildJSONExtractTree(element_type, "Dynamic inference"); - auto global_discriminator = variant_info.variant_name_to_discriminator[element_type->getName()]; + auto global_discriminator = variant_info.variant_name_to_discriminator.at(element_type_name); auto & variant = variant_column.getVariantByGlobalDiscriminator(global_discriminator); if (!node->insertResultToColumn(variant, element, insert_settings, format_settings, error)) return false; @@ -1377,29 +1378,15 @@ public: return true; } - /// We couldn't add new variant. Try to insert element into current variants. - auto variant_node = buildJSONExtractTree(variant_info.variant_type, "Dynamic inference"); - if (variant_node->insertResultToColumn(variant_column, element, insert_settings, format_settings, error)) - return true; - - /// We couldn't insert element into any existing variant, add String variant and read value as String. - column_dynamic.addStringVariant(); - auto string_global_discriminator = variant_info.variant_name_to_discriminator["String"]; - auto & string_column = variant_column.getVariantByGlobalDiscriminator(string_global_discriminator); - if (!getStringNode()->insertResultToColumn(string_column, element, insert_settings, format_settings, error)) + /// We couldn't add this variant, insert it into shared variant. + auto tmp_variant_column = element_type->createColumn(); + auto node = buildJSONExtractTree(element_type, "Dynamic inference"); + if (!node->insertResultToColumn(*tmp_variant_column, element, insert_settings, format_settings, error)) return false; - variant_column.getLocalDiscriminators().push_back(variant_column.localDiscriminatorByGlobal(string_global_discriminator)); - variant_column.getOffsets().push_back(string_column.size() - 1); + column_dynamic.insertValueIntoSharedVariant(*tmp_variant_column, element_type, element_type_name, 0); return true; } - static const std::unique_ptr> & getStringNode() - { - static const std::unique_ptr> string_node - = buildJSONExtractTree(std::make_shared(), "Dynamic inference"); - return string_node; - } - static DataTypePtr elementToDataType(const typename JSONParser::Element & element, const FormatSettings & format_settings) { JSONInferenceInfo json_inference_info; diff --git a/src/Formats/ReadSchemaUtils.cpp b/src/Formats/ReadSchemaUtils.cpp index 617595c19a2..dbbd728ed72 100644 --- a/src/Formats/ReadSchemaUtils.cpp +++ b/src/Formats/ReadSchemaUtils.cpp @@ -164,7 +164,7 @@ try return {*iterator_data.cached_columns, *format_name}; } - schemas_for_union_mode.emplace_back(iterator_data.cached_columns->getAll(), read_buffer_iterator.getLastFileName()); + schemas_for_union_mode.emplace_back(iterator_data.cached_columns->getAll(), read_buffer_iterator.getLastFilePath()); continue; } @@ -250,7 +250,7 @@ try if (!names_and_types.empty()) read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types)); - schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName()); + schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFilePath()); } catch (...) { @@ -411,7 +411,7 @@ try throw Exception(ErrorCodes::CANNOT_DETECT_FORMAT, "The data format cannot be detected by the contents of the files. You can specify the format manually"); read_buffer_iterator.setSchemaToLastFile(ColumnsDescription(names_and_types)); - schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFileName()); + schemas_for_union_mode.emplace_back(names_and_types, read_buffer_iterator.getLastFilePath()); } if (format_name && mode == SchemaInferenceMode::DEFAULT) @@ -527,9 +527,9 @@ try } catch (Exception & e) { - auto file_name = read_buffer_iterator.getLastFileName(); - if (!file_name.empty()) - e.addMessage(fmt::format("(in file/uri {})", file_name)); + auto file_path = read_buffer_iterator.getLastFilePath(); + if (!file_path.empty()) + e.addMessage(fmt::format("(in file/uri {})", file_path)); throw; } diff --git a/src/Formats/ReadSchemaUtils.h b/src/Formats/ReadSchemaUtils.h index bb5e068f696..7168e7f0817 100644 --- a/src/Formats/ReadSchemaUtils.h +++ b/src/Formats/ReadSchemaUtils.h @@ -56,8 +56,8 @@ struct IReadBufferIterator /// Set auto detected format name. virtual void setFormatName(const String & /*format_name*/) {} - /// Get last processed file name for better exception messages. - virtual String getLastFileName() const { return ""; } + /// Get last processed file path for better exception messages. + virtual String getLastFilePath() const { return ""; } /// Return true if method recreateLastReadBuffer is implemented. virtual bool supportsLastReadBufferRecreation() const { return false; } diff --git a/src/Formats/fuzzers/CMakeLists.txt b/src/Formats/fuzzers/CMakeLists.txt index 99e57bfbca3..b8a7e78b6e2 100644 --- a/src/Formats/fuzzers/CMakeLists.txt +++ b/src/Formats/fuzzers/CMakeLists.txt @@ -1,2 +1,2 @@ clickhouse_add_executable(format_fuzzer format_fuzzer.cpp ${SRCS}) -target_link_libraries(format_fuzzer PRIVATE dbms clickhouse_aggregate_functions clickhouse_functions) +target_link_libraries(format_fuzzer PRIVATE clickhouse_functions clickhouse_aggregate_functions) diff --git a/src/Formats/fuzzers/format_fuzzer.cpp b/src/Formats/fuzzers/format_fuzzer.cpp index 408e7218221..12cd40f9442 100644 --- a/src/Formats/fuzzers/format_fuzzer.cpp +++ b/src/Formats/fuzzers/format_fuzzer.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include diff --git a/src/Functions/CastOverloadResolver.cpp b/src/Functions/CastOverloadResolver.cpp index 49f63073aaf..6cb4d492fd8 100644 --- a/src/Functions/CastOverloadResolver.cpp +++ b/src/Functions/CastOverloadResolver.cpp @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -35,7 +36,7 @@ FunctionBasePtr createFunctionBaseCast( class CastOverloadResolverImpl : public IFunctionOverloadResolver { public: - const char * getNameImpl() const + static const char * getNameImpl(CastType cast_type, bool internal) { if (cast_type == CastType::accurate) return "accurateCast"; @@ -49,7 +50,7 @@ public: String getName() const override { - return getNameImpl(); + return getNameImpl(cast_type, internal); } size_t getNumberOfArguments() const override { return 2; } @@ -79,10 +80,22 @@ public: } } + static FunctionBasePtr createInternalCast(ColumnWithTypeAndName from, DataTypePtr to, CastType cast_type, std::optional diagnostic) + { + if (cast_type == CastType::accurateOrNull && !isVariant(to)) + to = makeNullable(to); + + ColumnsWithTypeAndName arguments; + arguments.emplace_back(std::move(from)); + arguments.emplace_back().type = std::make_unique(); + + return createFunctionBaseCast(nullptr, getNameImpl(cast_type, true), arguments, to, diagnostic, cast_type); + } + protected: FunctionBasePtr buildImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & return_type) const override { - return createFunctionBaseCast(context, getNameImpl(), arguments, return_type, diagnostic, cast_type); + return createFunctionBaseCast(context, getNameImpl(cast_type, internal), arguments, return_type, diagnostic, cast_type); } DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override @@ -130,9 +143,9 @@ private: }; -FunctionOverloadResolverPtr createInternalCastOverloadResolver(CastType type, std::optional diagnostic) +FunctionBasePtr createInternalCast(ColumnWithTypeAndName from, DataTypePtr to, CastType cast_type, std::optional diagnostic) { - return CastOverloadResolverImpl::create(ContextPtr{}, type, true, diagnostic); + return CastOverloadResolverImpl::createInternalCast(std::move(from), std::move(to), cast_type, std::move(diagnostic)); } REGISTER_FUNCTION(CastOverloadResolvers) diff --git a/src/Functions/CastOverloadResolver.h b/src/Functions/CastOverloadResolver.h index 7d98f774812..66f9d6cfcaf 100644 --- a/src/Functions/CastOverloadResolver.h +++ b/src/Functions/CastOverloadResolver.h @@ -3,6 +3,7 @@ #include #include #include +#include namespace DB @@ -11,6 +12,9 @@ namespace DB class IFunctionOverloadResolver; using FunctionOverloadResolverPtr = std::shared_ptr; +class IFunctionBase; +using FunctionBasePtr = std::shared_ptr; + enum class CastType : uint8_t { nonAccurate, @@ -24,6 +28,6 @@ struct CastDiagnostic std::string column_to; }; -FunctionOverloadResolverPtr createInternalCastOverloadResolver(CastType type, std::optional diagnostic); +FunctionBasePtr createInternalCast(ColumnWithTypeAndName from, DataTypePtr to, CastType cast_type, std::optional diagnostic); } diff --git a/src/Functions/DateTimeTransforms.h b/src/Functions/DateTimeTransforms.h index 6f0bdf406db..dfb4b76e5e2 100644 --- a/src/Functions/DateTimeTransforms.h +++ b/src/Functions/DateTimeTransforms.h @@ -1198,7 +1198,7 @@ struct ToYearImpl { if (point.getType() != Field::Types::UInt64) return std::nullopt; - auto year = point.get(); + auto year = point.safeGet(); if (year < DATE_LUT_MIN_YEAR || year >= DATE_LUT_MAX_YEAR) return std::nullopt; const DateLUTImpl & date_lut = DateLUT::instance("UTC"); @@ -2003,7 +2003,7 @@ struct ToYYYYMMImpl { if (point.getType() != Field::Types::UInt64) return std::nullopt; - auto year_month = point.get(); + auto year_month = point.safeGet(); auto year = year_month / 100; auto month = year_month % 100; diff --git a/src/Functions/FunctionsConsistentHashing.h b/src/Functions/FunctionsConsistentHashing.h index 306b6395dc5..210bb69e16d 100644 --- a/src/Functions/FunctionsConsistentHashing.h +++ b/src/Functions/FunctionsConsistentHashing.h @@ -101,9 +101,9 @@ private: BucketsType num_buckets; if (buckets_field.getType() == Field::Types::Int64) - num_buckets = checkBucketsRange(buckets_field.get()); + num_buckets = checkBucketsRange(buckets_field.safeGet()); else if (buckets_field.getType() == Field::Types::UInt64) - num_buckets = checkBucketsRange(buckets_field.get()); + num_buckets = checkBucketsRange(buckets_field.safeGet()); else throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of the second argument of function {}", diff --git a/src/Functions/FunctionsConversion.cpp b/src/Functions/FunctionsConversion.cpp index 675283d011e..0f54898177b 100644 --- a/src/Functions/FunctionsConversion.cpp +++ b/src/Functions/FunctionsConversion.cpp @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -117,7 +118,7 @@ UInt32 extractToDecimalScale(const ColumnWithTypeAndName & named_column) Field field; named_column.column->get(0, field); - return static_cast(field.get()); + return static_cast(field.safeGet()); } @@ -2604,8 +2605,8 @@ struct ToNumberMonotonicity if (left.isNull() || right.isNull()) return {}; - Float64 left_float = left.get(); - Float64 right_float = right.get(); + Float64 left_float = left.safeGet(); + Float64 right_float = right.safeGet(); if (left_float >= static_cast(std::numeric_limits::min()) && left_float <= static_cast(std::numeric_limits::max()) @@ -2633,11 +2634,11 @@ struct ToNumberMonotonicity const bool left_in_first_half = left.isNull() ? from_is_unsigned - : (left.get() >= 0); + : (left.safeGet() >= 0); const bool right_in_first_half = right.isNull() ? !from_is_unsigned - : (right.get() >= 0); + : (right.safeGet() >= 0); /// Size of type is the same. if (size_of_from == size_of_to) @@ -2675,7 +2676,7 @@ struct ToNumberMonotonicity return {}; /// Function cannot be monotonic when left and right are not on the same ranges. - if (divideByRangeOfType(left.get()) != divideByRangeOfType(right.get())) + if (divideByRangeOfType(left.safeGet()) != divideByRangeOfType(right.safeGet())) return {}; if (to_is_unsigned) @@ -2683,7 +2684,7 @@ struct ToNumberMonotonicity else { // If To is signed, it's possible that the signedness is different after conversion. So we check it explicitly. - const bool is_monotonic = (T(left.get()) >= 0) == (T(right.get()) >= 0); + const bool is_monotonic = (T(left.safeGet()) >= 0) == (T(right.safeGet()) >= 0); return { .is_monotonic = is_monotonic }; } @@ -2707,13 +2708,13 @@ struct ToDateMonotonicity } else if ( ((left.getType() == Field::Types::UInt64 || left.isNull()) && (right.getType() == Field::Types::UInt64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) + && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF))) || ((left.getType() == Field::Types::Int64 || left.isNull()) && (right.getType() == Field::Types::Int64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF))) + && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF))) || (( (left.getType() == Field::Types::Float64 || left.isNull()) && (right.getType() == Field::Types::Float64 || right.isNull()) - && ((left.isNull() || left.get() < 0xFFFF) && (right.isNull() || right.get() >= 0xFFFF)))) + && ((left.isNull() || left.safeGet() < 0xFFFF) && (right.isNull() || right.safeGet() >= 0xFFFF)))) || !isNativeNumber(type)) { return {}; @@ -2768,16 +2769,16 @@ struct ToStringMonotonicity if (left.getType() == Field::Types::UInt64 && right.getType() == Field::Types::UInt64) { - return (left.get() == 0 && right.get() == 0) - || (floor(log10(left.get())) == floor(log10(right.get()))) + return (left.safeGet() == 0 && right.safeGet() == 0) + || (floor(log10(left.safeGet())) == floor(log10(right.safeGet()))) ? positive : not_monotonic; } if (left.getType() == Field::Types::Int64 && right.getType() == Field::Types::Int64) { - return (left.get() == 0 && right.get() == 0) - || (left.get() > 0 && right.get() > 0 && floor(log10(left.get())) == floor(log10(right.get()))) + return (left.safeGet() == 0 && right.safeGet() == 0) + || (left.safeGet() > 0 && right.safeGet() > 0 && floor(log10(left.safeGet())) == floor(log10(right.safeGet()))) ? positive : not_monotonic; } @@ -4287,13 +4288,98 @@ private: WrapperType createDynamicToColumnWrapper(const DataTypePtr &) const { return [this] - (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable *, size_t input_rows_count) -> ColumnPtr { + /// When casting Dynamic to regular column we should cast all variants from current Dynamic column + /// and construct the result based on discriminators. const auto & column_dynamic = assert_cast(*arguments.front().column.get()); + const auto & variant_column = column_dynamic.getVariantColumn(); const auto & variant_info = column_dynamic.getVariantInfo(); - auto variant_wrapper = createVariantToColumnWrapper(assert_cast(*variant_info.variant_type), result_type); - ColumnsWithTypeAndName args = {ColumnWithTypeAndName(column_dynamic.getVariantColumnPtr(), variant_info.variant_type, "")}; - return variant_wrapper(args, result_type, col_nullable, input_rows_count); + + /// First, cast usual variants to result type. + const auto & variant_types = assert_cast(*variant_info.variant_type).getVariants(); + std::vector casted_variant_columns; + casted_variant_columns.reserve(variant_types.size()); + for (size_t i = 0; i != variant_types.size(); ++i) + { + const auto & variant_col = variant_column.getVariantPtrByGlobalDiscriminator(i); + ColumnsWithTypeAndName variant = {{variant_col, variant_types[i], ""}}; + auto variant_wrapper = prepareUnpackDictionaries(variant_types[i], result_type); + casted_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_col->size())); + } + + /// Second, collect all variants stored in shared variant and cast them to result type. + std::vector variant_columns_from_shared_variant; + DataTypes variant_types_from_shared_variant; + /// We will need to know what variant to use when we see discriminator of a shared variant. + /// To do it, we remember what variant was extracted from each row and what was it's offset. + PaddedPODArray shared_variant_indexes; + PaddedPODArray shared_variant_offsets; + std::unordered_map shared_variant_to_index; + const auto & shared_variant = column_dynamic.getSharedVariant(); + const auto shared_variant_discr = column_dynamic.getSharedVariantDiscriminator(); + const auto & local_discriminators = variant_column.getLocalDiscriminators(); + const auto & offsets = variant_column.getOffsets(); + if (!shared_variant.empty()) + { + shared_variant_indexes.reserve(input_rows_count); + shared_variant_offsets.reserve(input_rows_count); + FormatSettings format_settings; + const auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(shared_variant_discr); + for (size_t i = 0; i != input_rows_count; ++i) + { + if (local_discriminators[i] == shared_variant_local_discr) + { + auto value = shared_variant.getDataAt(offsets[i]); + ReadBufferFromMemory buf(value.data, value.size); + auto type = decodeDataType(buf); + auto type_name = type->getName(); + auto it = shared_variant_to_index.find(type_name); + /// Check if we didn't create column for this variant yet. + if (it == shared_variant_to_index.end()) + { + it = shared_variant_to_index.emplace(type_name, variant_columns_from_shared_variant.size()).first; + variant_columns_from_shared_variant.push_back(type->createColumn()); + variant_types_from_shared_variant.push_back(type); + } + + shared_variant_indexes.push_back(it->second); + shared_variant_offsets.push_back(variant_columns_from_shared_variant[it->second]->size()); + type->getDefaultSerialization()->deserializeBinary(*variant_columns_from_shared_variant[it->second], buf, format_settings); + } + else + { + shared_variant_indexes.emplace_back(); + shared_variant_offsets.emplace_back(); + } + } + } + + /// Cast all extracted variants into result type. + std::vector casted_shared_variant_columns; + casted_shared_variant_columns.reserve(variant_types_from_shared_variant.size()); + for (size_t i = 0; i != variant_types_from_shared_variant.size(); ++i) + { + ColumnsWithTypeAndName variant = {{variant_columns_from_shared_variant[i]->getPtr(), variant_types_from_shared_variant[i], ""}}; + auto variant_wrapper = prepareUnpackDictionaries(variant_types_from_shared_variant[i], result_type); + casted_shared_variant_columns.push_back(variant_wrapper(variant, result_type, nullptr, variant_columns_from_shared_variant[i]->size())); + } + + /// Construct result column from all casted variants. + auto res = result_type->createColumn(); + res->reserve(input_rows_count); + for (size_t i = 0; i != input_rows_count; ++i) + { + auto global_discr = variant_column.globalDiscriminatorByLocal(local_discriminators[i]); + if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) + res->insertDefault(); + else if (global_discr == shared_variant_discr) + res->insertFrom(*casted_shared_variant_columns[shared_variant_indexes[i]], shared_variant_offsets[i]); + else + res->insertFrom(*casted_variant_columns[global_discr], offsets[i]); + } + + return res; }; } @@ -4320,200 +4406,51 @@ private: }; } - std::pair getReducedVariant( - const ColumnVariant & variant_column, - const DataTypePtr & variant_type, - const std::unordered_map & variant_name_to_discriminator, - size_t max_result_num_variants, - const ColumnDynamic::Statistics & statistics = {}) const + WrapperType createVariantToDynamicWrapper(const DataTypeVariant & from_variant_type, const DataTypeDynamic & dynamic_type) const { - const auto & variant_types = assert_cast(*variant_type).getVariants(); - /// First check if we don't exceed the limit in current Variant column. - if (variant_types.size() < max_result_num_variants || (variant_types.size() == max_result_num_variants && variant_name_to_discriminator.contains("String"))) - return {variant_column.getPtr(), variant_type}; - - /// We want to keep the most frequent variants and convert to string the rarest. - std::vector> variant_sizes; - variant_sizes.reserve(variant_types.size()); - std::optional old_string_discriminator; - /// List of variants that should be converted to a single String variant. - std::vector variants_to_convert_to_string; - for (size_t i = 0; i != variant_types.size(); ++i) + /// First create extended Variant with shared variant type and cast this Variant to it. + auto variants_for_dynamic = from_variant_type.getVariants(); + size_t number_of_variants = variants_for_dynamic.size(); + variants_for_dynamic.push_back(ColumnDynamic::getSharedVariantDataType()); + const auto & variant_type_for_dynamic = std::make_shared(variants_for_dynamic); + auto old_to_new_variant_wrapper = createVariantToVariantWrapper(from_variant_type, *variant_type_for_dynamic); + auto max_dynamic_types = dynamic_type.getMaxDynamicTypes(); + return [old_to_new_variant_wrapper, variant_type_for_dynamic, number_of_variants, max_dynamic_types] + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr { - /// String variant won't be removed. - String variant_name = variant_types[i]->getName(); + auto variant_column_for_dynamic = old_to_new_variant_wrapper(arguments, result_type, col_nullable, input_rows_count); + /// If resulting Dynamic column can contain all variants from this Variant column, just create Dynamic column from it. + if (max_dynamic_types >= number_of_variants) + return ColumnDynamic::create(variant_column_for_dynamic, variant_type_for_dynamic, max_dynamic_types, max_dynamic_types); - if (variant_name == "String") - { - old_string_discriminator = i; - /// For simplicity, add this variant to the list that will be converted to string, - /// so we will process it with other variants when constructing the new String variant. - variants_to_convert_to_string.push_back(i); - } - else - { - size_t size = 0; - if (statistics.data.empty()) - size = variant_column.getVariantByGlobalDiscriminator(i).size(); - else - size = statistics.data.at(variant_name); - variant_sizes.emplace_back(size, i); - } - } - - /// Sort variants by sizes, so we will keep the most frequent. - std::sort(variant_sizes.begin(), variant_sizes.end(), std::greater()); - - DataTypes remaining_variants; - remaining_variants.reserve(max_result_num_variants); - /// Add String variant in advance. - remaining_variants.push_back(std::make_shared()); - for (auto [_, discr] : variant_sizes) - { - if (remaining_variants.size() != max_result_num_variants) - remaining_variants.push_back(variant_types[discr]); - else - variants_to_convert_to_string.push_back(discr); - } - - auto reduced_variant = std::make_shared(remaining_variants); - const auto & new_variants = reduced_variant->getVariants(); - /// To construct reduced variant column we will need mapping from old to new discriminators. - std::vector old_to_new_discriminators_mapping; - old_to_new_discriminators_mapping.resize(variant_types.size()); - ColumnVariant::Discriminator string_variant_discriminator = 0; - for (size_t i = 0; i != new_variants.size(); ++i) - { - String variant_name = new_variants[i]->getName(); - if (variant_name == "String") - { - string_variant_discriminator = i; - for (auto discr : variants_to_convert_to_string) - old_to_new_discriminators_mapping[discr] = i; - } - else - { - auto old_discr = variant_name_to_discriminator.at(variant_name); - old_to_new_discriminators_mapping[old_discr] = i; - } - } - - /// Convert all reduced variants to String. - std::unordered_map variants_converted_to_string; - variants_converted_to_string.reserve(variants_to_convert_to_string.size()); - size_t string_variant_size = 0; - for (auto discr : variants_to_convert_to_string) - { - auto string_type = std::make_shared(); - auto string_wrapper = prepareUnpackDictionaries(variant_types[discr], string_type); - auto column_to_convert = ColumnWithTypeAndName(variant_column.getVariantPtrByGlobalDiscriminator(discr), variant_types[discr], ""); - ColumnsWithTypeAndName args = {column_to_convert}; - auto variant_string_column = string_wrapper(args, string_type, nullptr, column_to_convert.column->size()); - string_variant_size += variant_string_column->size(); - variants_converted_to_string[discr] = variant_string_column; - } - - /// Create new discriminators and offsets and fill new String variant according to old discriminators. - auto string_variant = ColumnString::create(); - string_variant->reserve(string_variant_size); - auto new_discriminators_column = variant_column.getLocalDiscriminatorsPtr()->cloneEmpty(); - auto & new_discriminators_data = assert_cast(*new_discriminators_column).getData(); - new_discriminators_data.reserve(variant_column.size()); - auto new_offsets = variant_column.getOffsetsPtr()->cloneEmpty(); - auto & new_offsets_data = assert_cast(*new_offsets).getData(); - new_offsets_data.reserve(variant_column.size()); - const auto & old_local_discriminators = variant_column.getLocalDiscriminators(); - const auto & old_offsets = variant_column.getOffsets(); - for (size_t i = 0; i != old_local_discriminators.size(); ++i) - { - auto old_discr = variant_column.globalDiscriminatorByLocal(old_local_discriminators[i]); - - if (old_discr == ColumnVariant::NULL_DISCRIMINATOR) - { - new_discriminators_data.push_back(ColumnVariant::NULL_DISCRIMINATOR); - new_offsets_data.push_back(0); - continue; - } - - auto new_discr = old_to_new_discriminators_mapping[old_discr]; - new_discriminators_data.push_back(new_discr); - if (new_discr != string_variant_discriminator) - { - new_offsets_data.push_back(old_offsets[i]); - } - else - { - new_offsets_data.push_back(string_variant->size()); - string_variant->insertFrom(*variants_converted_to_string[old_discr], old_offsets[i]); - } - } - - /// Create new list of variant columns. - Columns new_variant_columns; - new_variant_columns.resize(new_variants.size()); - for (size_t i = 0; i != variant_types.size(); ++i) - { - auto new_discr = old_to_new_discriminators_mapping[i]; - if (new_discr != string_variant_discriminator) - new_variant_columns[new_discr] = variant_column.getVariantPtrByGlobalDiscriminator(i); - } - new_variant_columns[string_variant_discriminator] = std::move(string_variant); - return {ColumnVariant::create(std::move(new_discriminators_column), std::move(new_offsets), new_variant_columns), reduced_variant}; - } - - WrapperType createVariantToDynamicWrapper(const DataTypePtr & from_type, const DataTypeDynamic & dynamic_type) const - { - const auto & from_variant_type = assert_cast(*from_type); - size_t max_dynamic_types = dynamic_type.getMaxDynamicTypes(); - const auto & variants = from_variant_type.getVariants(); - std::unordered_map variant_name_to_discriminator; - variant_name_to_discriminator.reserve(variants.size()); - for (size_t i = 0; i != variants.size(); ++i) - variant_name_to_discriminator[variants[i]->getName()] = i; - - return [from_type, max_dynamic_types, variant_name_to_discriminator, this] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr - { - const auto & variant_column = assert_cast(*arguments.front().column); - auto [reduced_variant_column, reduced_variant_type] = getReducedVariant(variant_column, from_type, variant_name_to_discriminator, max_dynamic_types); - return ColumnDynamic::create(reduced_variant_column, reduced_variant_type, max_dynamic_types); + /// Otherwise some variants should go to the shared variant. Create temporary Dynamic column from this Variant and insert + /// all data to the resulting Dynamic column, this insertion will do all the logic with shared variant. + auto tmp_dynamic_column = ColumnDynamic::create(variant_column_for_dynamic, variant_type_for_dynamic, number_of_variants, number_of_variants); + auto result_dynamic_column = ColumnDynamic::create(max_dynamic_types); + result_dynamic_column->insertRangeFrom(*tmp_dynamic_column, 0, tmp_dynamic_column->size()); + return result_dynamic_column; }; } WrapperType createColumnToDynamicWrapper(const DataTypePtr & from_type, const DataTypeDynamic & dynamic_type) const { if (const auto * variant_type = typeid_cast(from_type.get())) - return createVariantToDynamicWrapper(from_type, dynamic_type); - - if (dynamic_type.getMaxDynamicTypes() == 1) - { - DataTypePtr string_type = std::make_shared(); - if (from_type->isNullable()) - string_type = makeNullable(string_type); - auto string_wrapper = prepareUnpackDictionaries(from_type, string_type); - auto variant_type = std::make_shared(DataTypes{removeNullable(string_type)}); - auto variant_wrapper = createColumnToVariantWrapper(string_type, *variant_type); - return [string_wrapper, variant_wrapper, string_type, variant_type, max_dynamic_types=dynamic_type.getMaxDynamicTypes()] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr - { - auto string_column = string_wrapper(arguments, string_type, col_nullable, input_rows_count); - auto column = ColumnWithTypeAndName(string_column, string_type, ""); - ColumnsWithTypeAndName args = {column}; - auto variant_column = variant_wrapper(args, variant_type, nullptr, string_column->size()); - return ColumnDynamic::create(variant_column, variant_type, max_dynamic_types); - }; - } + return createVariantToDynamicWrapper(*variant_type, dynamic_type); if (context && context->getSettingsRef().cast_string_to_dynamic_use_inference && isStringOrFixedString(removeNullable(removeLowCardinality(from_type)))) return createStringToDynamicThroughParsingWrapper(); + /// First, cast column to Variant with 2 variants - the type of the column we cast and shared variant type. auto variant_type = std::make_shared(DataTypes{removeNullableOrLowCardinalityNullable(from_type)}); - auto variant_wrapper = createColumnToVariantWrapper(from_type, *variant_type); - return [variant_wrapper, variant_type, max_dynamic_types=dynamic_type.getMaxDynamicTypes()] - (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr + auto column_to_variant_wrapper = createColumnToVariantWrapper(from_type, *variant_type); + /// Second, cast this Variant to Dynamic. + auto variant_to_dynamic_wrapper = createVariantToDynamicWrapper(*variant_type, dynamic_type); + return [column_to_variant_wrapper, variant_to_dynamic_wrapper, variant_type] + (ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, const ColumnNullable * col_nullable, size_t input_rows_count) -> ColumnPtr { - auto variant_res = variant_wrapper(arguments, variant_type, col_nullable, input_rows_count); - return ColumnDynamic::create(variant_res, variant_type, max_dynamic_types); + auto variant_res = column_to_variant_wrapper(arguments, variant_type, col_nullable, input_rows_count); + ColumnsWithTypeAndName args = {{variant_res, variant_type, ""}}; + return variant_to_dynamic_wrapper(args, result_type, nullptr, input_rows_count); }; } @@ -4530,21 +4467,26 @@ private: (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr { const auto & column_dynamic = assert_cast(*arguments[0].column); - return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), to_max_types); + /// We should use the same limit as already used in column and change only global limit. + /// It's needed because shared variant should contain values only when limit is exceeded, + /// so if there are already some data, we cannot increase the limit. + return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), column_dynamic.getMaxDynamicTypes(), to_max_types); }; } - return [to_max_types, this] + return [to_max_types] (ColumnsWithTypeAndName & arguments, const DataTypePtr &, const ColumnNullable *, size_t) -> ColumnPtr { const auto & column_dynamic = assert_cast(*arguments[0].column); - auto [reduced_variant_column, reduced_variant_type] = getReducedVariant( - column_dynamic.getVariantColumn(), - column_dynamic.getVariantInfo().variant_type, - column_dynamic.getVariantInfo().variant_name_to_discriminator, - to_max_types, - column_dynamic.getStatistics()); - return ColumnDynamic::create(reduced_variant_column, reduced_variant_type, to_max_types); + /// If real limit in the column is not greater than desired, just use the same variant column. + if (column_dynamic.getMaxDynamicTypes() <= to_max_types) + return ColumnDynamic::create(column_dynamic.getVariantColumnPtr(), column_dynamic.getVariantInfo(), column_dynamic.getMaxDynamicTypes(), to_max_types); + + /// Otherwise some variants should go to the shared variant. In this case we can just insert all + /// the data into resulting column and it will do all the logic with shared variant. + auto result_dynamic_column = ColumnDynamic::create(to_max_types); + result_dynamic_column->insertRangeFrom(column_dynamic, 0, column_dynamic.size()); + return result_dynamic_column; }; } @@ -4673,7 +4615,7 @@ private: return [function_name] ( ColumnsWithTypeAndName & arguments, const DataTypePtr & res_type, const ColumnNullable * nullable_col, size_t /*input_rows_count*/) { - using ColumnEnumType = EnumType::ColumnType; + using ColumnEnumType = typename EnumType::ColumnType; const auto & first_col = arguments.front().column.get(); const auto & first_type = arguments.front().type.get(); diff --git a/src/Functions/FunctionsJSON.cpp b/src/Functions/FunctionsJSON.cpp index c35df8ba72d..1eaf0d1609a 100644 --- a/src/Functions/FunctionsJSON.cpp +++ b/src/Functions/FunctionsJSON.cpp @@ -243,7 +243,7 @@ private: } case MoveType::Index: { - Int64 index = (*arguments[j + 1].column)[row].get(); + Int64 index = (*arguments[j + 1].column)[row].safeGet(); if (!moveToElementByIndex(res_element, static_cast(index), key)) return false; break; diff --git a/src/Functions/FunctionsLogical.cpp b/src/Functions/FunctionsLogical.cpp index 65d7473b945..ff0cff09c9e 100644 --- a/src/Functions/FunctionsLogical.cpp +++ b/src/Functions/FunctionsLogical.cpp @@ -701,11 +701,11 @@ ColumnPtr FunctionAnyArityLogical::getConstantResultForNonConstArgum bool constant_value_bool = false; if (field_type == Field::Types::Float64) - constant_value_bool = static_cast(constant_field_value.get()); + constant_value_bool = static_cast(constant_field_value.safeGet()); else if (field_type == Field::Types::Int64) - constant_value_bool = static_cast(constant_field_value.get()); + constant_value_bool = static_cast(constant_field_value.safeGet()); else if (field_type == Field::Types::UInt64) - constant_value_bool = static_cast(constant_field_value.get()); + constant_value_bool = static_cast(constant_field_value.safeGet()); has_true_constant = has_true_constant || constant_value_bool; has_false_constant = has_false_constant || !constant_value_bool; diff --git a/src/Functions/FunctionsRound.h b/src/Functions/FunctionsRound.h index 6b65a5feaec..ed7fe1a5de1 100644 --- a/src/Functions/FunctionsRound.h +++ b/src/Functions/FunctionsRound.h @@ -500,7 +500,7 @@ inline Scale getScaleArg(const ColumnConst* scale_col) { const auto & scale_field = scale_col->getField(); - Int64 scale64 = scale_field.get(); + Int64 scale64 = scale_field.safeGet(); validateScale(scale64); return scale64; @@ -632,7 +632,7 @@ public: Scale raw_scale = scale64; DecimalRoundingImpl::applyOne(value_col_typed->getElement(i), value_col_typed->getScale(), - reinterpret_cast::NativeT&>(col_res->getElement(i)), raw_scale); + reinterpret_cast::NativeT&>(col_res->getElement(i)), raw_scale); } } } @@ -854,7 +854,7 @@ private: using ValueType = typename Container::value_type; std::vector boundary_values(boundaries.size()); for (size_t i = 0; i < boundaries.size(); ++i) - boundary_values[i] = static_cast(boundaries[i].get()); + boundary_values[i] = static_cast(boundaries[i].safeGet()); ::sort(boundary_values.begin(), boundary_values.end()); boundary_values.erase(std::unique(boundary_values.begin(), boundary_values.end()), boundary_values.end()); diff --git a/src/Functions/FunctionsTimeWindow.cpp b/src/Functions/FunctionsTimeWindow.cpp index 77d740803be..88b85c48326 100644 --- a/src/Functions/FunctionsTimeWindow.cpp +++ b/src/Functions/FunctionsTimeWindow.cpp @@ -267,7 +267,12 @@ struct TimeWindowImpl { auto type = WhichDataType(arguments[0].type); if (type.isTuple()) - return std::static_pointer_cast(arguments[0].type)->getElement(0); + { + const auto & tuple_elems = std::static_pointer_cast(arguments[0].type)->getElements(); + if (tuple_elems.empty()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Tuple passed to {} should not be empty", function_name); + return tuple_elems[0]; + } else if (type.isUInt32()) return std::make_shared(); else @@ -622,7 +627,12 @@ struct TimeWindowImpl { auto type = WhichDataType(arguments[0].type); if (type.isTuple()) - return std::static_pointer_cast(arguments[0].type)->getElement(0); + { + const auto & tuple_elems = std::static_pointer_cast(arguments[0].type)->getElements(); + if (tuple_elems.empty()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Tuple passed to {} should not be empty", function_name); + return tuple_elems[0]; + } else if (type.isUInt32()) return std::make_shared(); else diff --git a/src/Functions/IFunctionCustomWeek.h b/src/Functions/IFunctionCustomWeek.h index 51542c9cab1..ba0baa35819 100644 --- a/src/Functions/IFunctionCustomWeek.h +++ b/src/Functions/IFunctionCustomWeek.h @@ -50,15 +50,15 @@ public: if (checkAndGetDataType(&type)) { - return Transform::FactorTransform::execute(UInt16(left.get()), date_lut) - == Transform::FactorTransform::execute(UInt16(right.get()), date_lut) + return Transform::FactorTransform::execute(UInt16(left.safeGet()), date_lut) + == Transform::FactorTransform::execute(UInt16(right.safeGet()), date_lut) ? is_monotonic : is_not_monotonic; } else { - return Transform::FactorTransform::execute(UInt32(left.get()), date_lut) - == Transform::FactorTransform::execute(UInt32(right.get()), date_lut) + return Transform::FactorTransform::execute(UInt32(left.safeGet()), date_lut) + == Transform::FactorTransform::execute(UInt32(right.safeGet()), date_lut) ? is_monotonic : is_not_monotonic; } diff --git a/src/Functions/IFunctionDateOrDateTime.h b/src/Functions/IFunctionDateOrDateTime.h index 762b79bfafc..899aa2c305d 100644 --- a/src/Functions/IFunctionDateOrDateTime.h +++ b/src/Functions/IFunctionDateOrDateTime.h @@ -72,30 +72,30 @@ public: if (checkAndGetDataType(type_ptr)) { - return Transform::FactorTransform::execute(UInt16(left.get()), *date_lut) - == Transform::FactorTransform::execute(UInt16(right.get()), *date_lut) + return Transform::FactorTransform::execute(UInt16(left.safeGet()), *date_lut) + == Transform::FactorTransform::execute(UInt16(right.safeGet()), *date_lut) ? is_monotonic : is_not_monotonic; } else if (checkAndGetDataType(type_ptr)) { - return Transform::FactorTransform::execute(Int32(left.get()), *date_lut) - == Transform::FactorTransform::execute(Int32(right.get()), *date_lut) + return Transform::FactorTransform::execute(Int32(left.safeGet()), *date_lut) + == Transform::FactorTransform::execute(Int32(right.safeGet()), *date_lut) ? is_monotonic : is_not_monotonic; } else if (checkAndGetDataType(type_ptr)) { - return Transform::FactorTransform::execute(UInt32(left.get()), *date_lut) - == Transform::FactorTransform::execute(UInt32(right.get()), *date_lut) + return Transform::FactorTransform::execute(UInt32(left.safeGet()), *date_lut) + == Transform::FactorTransform::execute(UInt32(right.safeGet()), *date_lut) ? is_monotonic : is_not_monotonic; } else { assert(checkAndGetDataType(type_ptr)); - const auto & left_date_time = left.get(); + const auto & left_date_time = left.safeGet(); TransformDateTime64 transformer_left(left_date_time.getScale()); - const auto & right_date_time = right.get(); + const auto & right_date_time = right.safeGet(); TransformDateTime64 transformer_right(right_date_time.getScale()); return transformer_left.execute(left_date_time.getValue(), *date_lut) diff --git a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp index fb74018b330..84ac0ff08f3 100644 --- a/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp +++ b/src/Functions/JSONPath/Parsers/ParserJSONPathRange.cpp @@ -46,7 +46,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { return false; } - range_indices.first = static_cast(number_ptr->as()->value.get()); + range_indices.first = static_cast(number_ptr->as()->value.safeGet()); if (pos->type == TokenType::Comma || pos->type == TokenType::ClosingSquareBracket) { @@ -63,7 +63,7 @@ bool ParserJSONPathRange::parseImpl(Pos & pos, ASTPtr & node, Expected & expecte { return false; } - range_indices.second = static_cast(number_ptr->as()->value.get()); + range_indices.second = static_cast(number_ptr->as()->value.safeGet()); } else { diff --git a/src/Functions/MultiMatchAllIndicesImpl.h b/src/Functions/MultiMatchAllIndicesImpl.h index 3aeac808880..e7c3aebf794 100644 --- a/src/Functions/MultiMatchAllIndicesImpl.h +++ b/src/Functions/MultiMatchAllIndicesImpl.h @@ -77,7 +77,7 @@ struct MultiMatchAllIndicesImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); checkHyperscanRegexp(needles, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); diff --git a/src/Functions/MultiMatchAnyImpl.h b/src/Functions/MultiMatchAnyImpl.h index fda752cbacc..54413cbc1cd 100644 --- a/src/Functions/MultiMatchAnyImpl.h +++ b/src/Functions/MultiMatchAnyImpl.h @@ -91,7 +91,7 @@ struct MultiMatchAnyImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); checkHyperscanRegexp(needles, max_hyperscan_regexp_length, max_hyperscan_regexp_total_length); diff --git a/src/Functions/MultiSearchAllPositionsImpl.h b/src/Functions/MultiSearchAllPositionsImpl.h index cfe60e51bcd..6c2cd215638 100644 --- a/src/Functions/MultiSearchAllPositionsImpl.h +++ b/src/Functions/MultiSearchAllPositionsImpl.h @@ -33,7 +33,7 @@ struct MultiSearchAllPositionsImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 { diff --git a/src/Functions/MultiSearchFirstIndexImpl.h b/src/Functions/MultiSearchFirstIndexImpl.h index 5b34dbfe601..f1dc9ab9e11 100644 --- a/src/Functions/MultiSearchFirstIndexImpl.h +++ b/src/Functions/MultiSearchFirstIndexImpl.h @@ -45,7 +45,7 @@ struct MultiSearchFirstIndexImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto searcher = Impl::createMultiSearcherInBigHaystack(needles); diff --git a/src/Functions/MultiSearchFirstPositionImpl.h b/src/Functions/MultiSearchFirstPositionImpl.h index 06bf7aa94d8..4380eeb1b29 100644 --- a/src/Functions/MultiSearchFirstPositionImpl.h +++ b/src/Functions/MultiSearchFirstPositionImpl.h @@ -45,7 +45,7 @@ struct MultiSearchFirstPositionImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto res_callback = [](const UInt8 * start, const UInt8 * end) -> UInt64 { diff --git a/src/Functions/MultiSearchImpl.h b/src/Functions/MultiSearchImpl.h index 909425f5a93..5c652ddcb74 100644 --- a/src/Functions/MultiSearchImpl.h +++ b/src/Functions/MultiSearchImpl.h @@ -45,7 +45,7 @@ struct MultiSearchImpl std::vector needles; needles.reserve(needles_arr.size()); for (const auto & needle : needles_arr) - needles.emplace_back(needle.get()); + needles.emplace_back(needle.safeGet()); auto searcher = Impl::createMultiSearcherInBigHaystack(needles); diff --git a/src/Functions/URL/cutURLParameter.cpp b/src/Functions/URL/cutURLParameter.cpp index 3ab9cad1ea7..4439e79e962 100644 --- a/src/Functions/URL/cutURLParameter.cpp +++ b/src/Functions/URL/cutURLParameter.cpp @@ -156,7 +156,7 @@ public: for (size_t j = 0; j < num_needles; ++j) { auto field = col_needle_const_array->getData()[j]; - cutURL(res_data, field.get(), res_offset, cur_res_offset); + cutURL(res_data, field.safeGet(), res_offset, cur_res_offset); } } else diff --git a/src/Functions/array/arrayElement.cpp b/src/Functions/array/arrayElement.cpp index 227b29d5d9f..81f3f97979b 100644 --- a/src/Functions/array/arrayElement.cpp +++ b/src/Functions/array/arrayElement.cpp @@ -904,10 +904,10 @@ ColumnPtr FunctionArrayElement::executeNumberConst( return nullptr; if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { ArrayElementNumImpl::template vectorConst( - col_nested->getData(), col_array->getOffsets(), index.get() - 1, col_res_vec->getData(), builder); + col_nested->getData(), col_array->getOffsets(), index.safeGet() - 1, col_res_vec->getData(), builder); } else if (index.getType() == Field::Types::Int64) { @@ -972,14 +972,14 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument auto col_res = ColumnString::create(); if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { if (builder) ArrayElementStringImpl::vectorConst( col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - index.get() - 1, + index.safeGet() - 1, col_res->getChars(), col_res->getOffsets(), builder); @@ -988,7 +988,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - index.get() - 1, + index.safeGet() - 1, col_res->getChars(), col_res->getOffsets(), builder); @@ -1000,7 +1000,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), col_res->getChars(), col_res->getOffsets(), builder); @@ -1009,7 +1009,7 @@ FunctionArrayElement::executeStringConst(const ColumnsWithTypeAndName & argument col_nested->getChars(), col_array->getOffsets(), col_nested->getOffsets(), - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), col_res->getChars(), col_res->getOffsets(), builder); @@ -1046,7 +1046,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( auto res_offsets = ColumnArray::ColumnOffsets::create(); auto res_string_null_map = col_nullable ? ColumnUInt8::create() : nullptr; if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { if (col_nullable) ArrayElementArrayStringImpl::vectorConst( @@ -1055,7 +1055,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), &string_null_map->getData(), - index.get() - 1, + index.safeGet() - 1, res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1068,7 +1068,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), nullptr, - index.get() - 1, + index.safeGet() - 1, res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1084,7 +1084,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), &string_null_map->getData(), - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1097,7 +1097,7 @@ ColumnPtr FunctionArrayElement::executeArrayStringConst( col_nested_array->getOffsets(), col_nested_elem->getOffsets(), nullptr, - -(UInt64(index.get()) + 1), + -(UInt64(index.safeGet()) + 1), res_string->getChars(), res_offsets->getData(), res_string->getOffsets(), @@ -1153,7 +1153,7 @@ ColumnPtr FunctionArrayElement::executeArrayNumberConst( auto & res_offsets = res_array->getOffsets(); NullMap * res_null_map = res_nullable ? &res_nullable->getNullMapData() : nullptr; - if (index.getType() == Field::Types::UInt64 || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + if (index.getType() == Field::Types::UInt64 || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) { if (col_nullable) ArrayElementArrayNumImpl::template vectorConst( @@ -1161,7 +1161,7 @@ ColumnPtr FunctionArrayElement::executeArrayNumberConst( col_array->getOffsets(), col_nested_array->getOffsets(), null_map, - index.get() - 1, + index.safeGet() - 1, res_data->getData(), res_offsets, res_null_map, @@ -1172,7 +1172,7 @@ ColumnPtr FunctionArrayElement::executeArrayNumberConst( col_array->getOffsets(), col_nested_array->getOffsets(), null_map, - index.get() - 1, + index.safeGet() - 1, res_data->getData(), res_offsets, res_null_map, @@ -1392,12 +1392,12 @@ ColumnPtr FunctionArrayElement::executeGenericConst( auto col_res = col_nested.cloneEmpty(); if (index.getType() == Field::Types::UInt64 - || (index.getType() == Field::Types::Int64 && index.get() >= 0)) + || (index.getType() == Field::Types::Int64 && index.safeGet() >= 0)) ArrayElementGenericImpl::vectorConst( - col_nested, col_array->getOffsets(), index.get() - 1, *col_res, builder); + col_nested, col_array->getOffsets(), index.safeGet() - 1, *col_res, builder); else if (index.getType() == Field::Types::Int64) ArrayElementGenericImpl::vectorConst( - col_nested, col_array->getOffsets(), -(static_cast(index.get() + 1)), *col_res, builder); + col_nested, col_array->getOffsets(), -(static_cast(index.safeGet() + 1)), *col_res, builder); else throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal type of array index"); @@ -1789,7 +1789,7 @@ bool FunctionArrayElement::matchKeyToIndexStringConst( using DataColumn = std::decay_t; if (index.getType() != Field::Types::String) return false; - MatcherStringConst matcher{data_column, index.get()}; + MatcherStringConst matcher{data_column, index.safeGet()}; executeMatchKeyToIndex(offsets, matched_idxs, matcher); return true; }); diff --git a/src/Functions/array/mapOp.cpp b/src/Functions/array/mapOp.cpp index 86797cb5db0..614b01c2ac8 100644 --- a/src/Functions/array/mapOp.cpp +++ b/src/Functions/array/mapOp.cpp @@ -237,7 +237,7 @@ private: } arg.val_column->get(offset + j, temp_val); - ValType value = temp_val.get(); + ValType value = temp_val.safeGet(); if constexpr (op_type == OpTypes::ADD) { diff --git a/src/Functions/dynamicType.cpp b/src/Functions/dynamicType.cpp index e8ca73597d6..327cdfe1616 100644 --- a/src/Functions/dynamicType.cpp +++ b/src/Functions/dynamicType.cpp @@ -2,10 +2,14 @@ #include #include #include +#include +#include #include #include #include #include +#include +#include #include @@ -65,11 +69,15 @@ public: const auto & variant_column = dynamic_column->getVariantColumn(); auto res = result_type->createColumn(); String element_type; + auto shared_variant_discr = dynamic_column->getSharedVariantDiscriminator(); + const auto & shared_variant = dynamic_column->getSharedVariant(); for (size_t i = 0; i != input_rows_count; ++i) { auto global_discr = variant_column.globalDiscriminatorAt(i); if (global_discr == ColumnVariant::NULL_DISCRIMINATOR) element_type = name_for_null; + else if (global_discr == shared_variant_discr) + element_type = getTypeNameFromSharedVariantValue(shared_variant.getDataAt(variant_column.offsetAt(i))); else element_type = variant_info.variant_names[global_discr]; @@ -78,6 +86,63 @@ public: return res; } + + String getTypeNameFromSharedVariantValue(StringRef value) const + { + ReadBufferFromMemory buf(value.data, value.size); + return decodeDataType(buf)->getName(); + } +}; + +class FunctionIsDynamicElementInSharedData : public IFunction +{ +public: + static constexpr auto name = "isDynamicElementInSharedData"; + + static FunctionPtr create(ContextPtr) { return std::make_shared(); } + String getName() const override { return name; } + size_t getNumberOfArguments() const override { return 1; } + bool useDefaultImplementationForConstants() const override { return true; } + bool useDefaultImplementationForNulls() const override { return false; } + bool useDefaultImplementationForLowCardinalityColumns() const override { return false; } + bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return true; } + + DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override + { + if (arguments.empty() || arguments.size() > 1) + throw Exception( + ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, + "Number of arguments for function {} doesn't match: passed {}, should be 1", + getName(), arguments.empty()); + + if (!isDynamic(arguments[0].type.get())) + throw Exception( + ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be Dynamic, got {} instead", + getName(), arguments[0].type->getName()); + + return DataTypeFactory::instance().get("Bool"); + } + + ColumnPtr executeImpl(const ColumnsWithTypeAndName & arguments, const DataTypePtr & result_type, size_t input_rows_count) const override + { + const ColumnDynamic * dynamic_column = checkAndGetColumn(arguments[0].column.get()); + if (!dynamic_column) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, + "First argument for function {} must be Dynamic, got {} instead", + getName(), arguments[0].type->getName()); + + const auto & variant_column = dynamic_column->getVariantColumn(); + const auto & local_discriminators = variant_column.getLocalDiscriminators(); + auto res = result_type->createColumn(); + auto & res_data = assert_cast(*res).getData(); + res_data.reserve(dynamic_column->size()); + auto shared_variant_local_discr = variant_column.localDiscriminatorByGlobal(dynamic_column->getSharedVariantDiscriminator()); + for (size_t i = 0; i != input_rows_count; ++i) + res_data.push_back(local_discriminators[i] == shared_variant_local_discr); + + return res; + } }; } @@ -88,7 +153,7 @@ REGISTER_FUNCTION(DynamicType) .description = R"( Returns the variant type name for each row of `Dynamic` column. If row contains NULL, it returns 'None' for it. )", - .syntax = {"dynamicType(variant)"}, + .syntax = {"dynamicType(dynamic)"}, .arguments = {{"dynamic", "Dynamic column"}}, .examples = {{{ "Example", @@ -104,6 +169,30 @@ SELECT d, dynamicType(d) FROM test; │ Hello, World! │ String │ │ [1,2,3] │ Array(Int64) │ └───────────────┴────────────────┘ +)"}}}, + .categories{"Variant"}, + }); + + factory.registerFunction(FunctionDocumentation{ + .description = R"( +Returns true for rows in Dynamic column that are not separated into subcolumns and stored inside shared variant in binary form. +)", + .syntax = {"isDynamicElementInSharedData(dynamic)"}, + .arguments = {{"dynamic", "Dynamic column"}}, + .examples = {{{ + "Example", + R"( +CREATE TABLE test (d Dynamic(max_types=2)) ENGINE = Memory; +INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]); +SELECT d, isDynamicElementInSharedData(d) FROM test; +)", + R"( +┌─d─────────────┬─isDynamicElementInSharedData(d)─┐ +│ ᴺᵁᴸᴸ │ false │ +│ 42 │ false │ +│ Hello, World! │ true │ +│ [1,2,3] │ true │ +└───────────────┴────────────────────┘ )"}}}, .categories{"Variant"}, }); diff --git a/src/Functions/geometryConverters.h b/src/Functions/geometryConverters.h index 03831d37e0c..f1156d81f01 100644 --- a/src/Functions/geometryConverters.h +++ b/src/Functions/geometryConverters.h @@ -31,6 +31,9 @@ namespace ErrorCodes template using LineString = boost::geometry::model::linestring; +template +using MultiLineString = boost::geometry::model::multi_linestring>; + template using Ring = boost::geometry::model::ring; @@ -42,12 +45,14 @@ using MultiPolygon = boost::geometry::model::multi_polygon>; using CartesianPoint = boost::geometry::model::d2::point_xy; using CartesianLineString = LineString; +using CartesianMultiLineString = MultiLineString; using CartesianRing = Ring; using CartesianPolygon = Polygon; using CartesianMultiPolygon = MultiPolygon; using SphericalPoint = boost::geometry::model::point>; using SphericalLineString = LineString; +using SphericalMultiLineString = MultiLineString; using SphericalRing = Ring; using SphericalPolygon = Polygon; using SphericalMultiPolygon = MultiPolygon; @@ -113,6 +118,28 @@ struct ColumnToLineStringsConverter } }; +/** + * Class which converts Column with type Array(Array(Tuple(Float64, Float64))) to a vector of boost multi_linestring type. +*/ +template +struct ColumnToMultiLineStringsConverter +{ + static std::vector> convert(ColumnPtr col) + { + const IColumn::Offsets & offsets = typeid_cast(*col).getOffsets(); + size_t prev_offset = 0; + std::vector> answer(offsets.size()); + auto all_linestrings = ColumnToLineStringsConverter::convert(typeid_cast(*col).getDataPtr()); + for (size_t iter = 0; iter < offsets.size() && iter < all_linestrings.size(); ++iter) + { + for (size_t linestring_iter = prev_offset; linestring_iter < offsets[iter]; ++linestring_iter) + answer[iter].emplace_back(std::move(all_linestrings[linestring_iter])); + prev_offset = offsets[iter]; + } + return answer; + } +}; + /** * Class which converts Column with type Array(Tuple(Float64, Float64)) to a vector of boost ring type. */ @@ -268,6 +295,38 @@ private: ColumnUInt64::MutablePtr offsets; }; +/// Serialize Point, MultiLineString as MultiLineString +template +class MultiLineStringSerializer +{ +public: + MultiLineStringSerializer() + : offsets(ColumnUInt64::create()) + {} + + explicit MultiLineStringSerializer(size_t n) + : offsets(ColumnUInt64::create(n)) + {} + + void add(const MultiLineString & multilinestring) + { + size += multilinestring.size(); + offsets->insertValue(size); + for (const auto & linestring : multilinestring) + linestring_serializer.add(linestring); + } + + ColumnPtr finalize() + { + return ColumnArray::create(linestring_serializer.finalize(), std::move(offsets)); + } + +private: + size_t size = 0; + LineStringSerializer linestring_serializer; + ColumnUInt64::MutablePtr offsets; +}; + /// Almost the same as LineStringSerializer /// Serialize Point, Ring as Ring template @@ -411,6 +470,11 @@ static void callOnGeometryDataType(DataTypePtr type, F && f) else if (factory.get("LineString")->equals(*type) && type->getCustomName() && type->getCustomName()->getName() == "LineString") return f(ConverterType>()); + /// We should take the name into consideration to avoid ambiguity. + /// Because for example both MultiLineString and Polygon are resolved to Array(Array(Point)). + else if (factory.get("MultiLineString")->equals(*type) && type->getCustomName() && type->getCustomName()->getName() == "MultiLineString") + return f(ConverterType>()); + /// For backward compatibility if we call this function not on a custom type, we will consider Array(Tuple(Point)) as type Ring. else if (factory.get("Ring")->equals(*type)) return f(ConverterType>()); diff --git a/src/Functions/getClientHTTPHeader.cpp b/src/Functions/getClientHTTPHeader.cpp index 140f39d03b8..50a6275fc82 100644 --- a/src/Functions/getClientHTTPHeader.cpp +++ b/src/Functions/getClientHTTPHeader.cpp @@ -58,7 +58,7 @@ public: { Field header; source->get(row, header); - if (auto it = client_info.http_headers.find(header.get()); it != client_info.http_headers.end()) + if (auto it = client_info.http_headers.find(header.safeGet()); it != client_info.http_headers.end()) result->insert(it->second); else result->insertDefault(); diff --git a/src/Functions/multiIf.cpp b/src/Functions/multiIf.cpp index c4b675fcf6c..14b8b70b22c 100644 --- a/src/Functions/multiIf.cpp +++ b/src/Functions/multiIf.cpp @@ -200,7 +200,7 @@ public: if (value.isNull()) continue; - if (value.get() == 0) + if (value.safeGet() == 0) continue; instruction.condition_always_true = true; diff --git a/src/Functions/nested.cpp b/src/Functions/nested.cpp index bdaf57d65c9..85c342b5e7c 100644 --- a/src/Functions/nested.cpp +++ b/src/Functions/nested.cpp @@ -145,7 +145,7 @@ private: if (nested_names_field.getType() != Field::Types::Array) return {}; - const auto & nested_names_array = nested_names_field.get(); + const auto & nested_names_array = nested_names_field.safeGet(); Names nested_names; nested_names.reserve(nested_names_array.size()); @@ -155,7 +155,7 @@ private: if (nested_name_field.getType() != Field::Types::String) return {}; - nested_names.push_back(nested_name_field.get()); + nested_names.push_back(nested_name_field.safeGet()); } return nested_names; diff --git a/src/Functions/polygonsIntersection.cpp b/src/Functions/polygonsIntersection.cpp index 329242e762e..43ab03f8c1f 100644 --- a/src/Functions/polygonsIntersection.cpp +++ b/src/Functions/polygonsIntersection.cpp @@ -75,6 +75,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/polygonsSymDifference.cpp b/src/Functions/polygonsSymDifference.cpp index 3c219d0facb..6faec95bb7b 100644 --- a/src/Functions/polygonsSymDifference.cpp +++ b/src/Functions/polygonsSymDifference.cpp @@ -73,6 +73,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/polygonsUnion.cpp b/src/Functions/polygonsUnion.cpp index 969eb2f78fb..5378ff636f8 100644 --- a/src/Functions/polygonsUnion.cpp +++ b/src/Functions/polygonsUnion.cpp @@ -73,6 +73,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/polygonsWithin.cpp b/src/Functions/polygonsWithin.cpp index c63ad5ef868..dacd1c0e18f 100644 --- a/src/Functions/polygonsWithin.cpp +++ b/src/Functions/polygonsWithin.cpp @@ -77,6 +77,8 @@ public: throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be Point", getName()); else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be LineString", getName()); + else if constexpr (std::is_same_v, LeftConverter> || std::is_same_v, RightConverter>) + throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Any argument of function {} must not be MultiLineString", getName()); else { auto first = LeftConverter::convert(arguments[0].column->convertToFullColumnIfConst()); diff --git a/src/Functions/readWkt.cpp b/src/Functions/readWkt.cpp index eb262777b0d..2010b5167e7 100644 --- a/src/Functions/readWkt.cpp +++ b/src/Functions/readWkt.cpp @@ -87,6 +87,11 @@ struct ReadWKTLineStringNameHolder static constexpr const char * name = "readWKTLineString"; }; +struct ReadWKTMultiLineStringNameHolder +{ + static constexpr const char * name = "readWKTMultiLineString"; +}; + struct ReadWKTRingNameHolder { static constexpr const char * name = "readWKTRing"; @@ -131,6 +136,31 @@ Parses a Well-Known Text (WKT) representation of a LineString geometry and retur }, .categories{"Unique identifiers"} }); + factory.registerFunction, ReadWKTMultiLineStringNameHolder>>(FunctionDocumentation + { + .description=R"( +Parses a Well-Known Text (WKT) representation of a MultiLineString geometry and returns it in the internal ClickHouse format. +)", + .syntax = "readWKTMultiLineString(wkt_string)", + .arguments{ + {"wkt_string", "The input WKT string representing a MultiLineString geometry."} + }, + .returned_value = "The function returns a ClickHouse internal representation of the multilinestring geometry.", + .examples{ + {"first call", "SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))');", R"( +┌─readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3), (4 4, 5 5, 6 6))')─┐ +│ [[(1,1),(2,2),(3,3)],[(4,4),(5,5),(6,6)]] │ +└──────────────────────────────────────────────────────────────────────────────┘ + + )"}, + {"second call", "SELECT toTypeName(readWKTLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'));", R"( +┌─toTypeName(readWKTLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'))─┐ +│ MultiLineString │ +└─────────────────────────────────────────────────────────────────────────┘ + )"}, + }, + .categories{"Unique identifiers"} + }); factory.registerFunction, ReadWKTRingNameHolder>>(); factory.registerFunction, ReadWKTPolygonNameHolder>>(); factory.registerFunction, ReadWKTMultiPolygonNameHolder>>(); diff --git a/src/Functions/toBool.cpp b/src/Functions/toBool.cpp index 6f2c436c1ea..ac595d313e3 100644 --- a/src/Functions/toBool.cpp +++ b/src/Functions/toBool.cpp @@ -54,8 +54,7 @@ namespace } }; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); - auto func_cast = func_builder_cast->build(cast_args); + auto func_cast = createInternalCast(arguments[0], result_type, CastType::nonAccurate, {}); return func_cast->execute(cast_args, result_type, arguments[0].column->size()); } }; diff --git a/src/IO/HTTPCommon.cpp b/src/IO/HTTPCommon.cpp index 9704d034b2a..fcd96e97b4e 100644 --- a/src/IO/HTTPCommon.cpp +++ b/src/IO/HTTPCommon.cpp @@ -34,14 +34,20 @@ namespace ErrorCodes extern const int RECEIVED_ERROR_TOO_MANY_REQUESTS; } -void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout) +void setResponseDefaultHeaders(HTTPServerResponse & response) { if (!response.getKeepAlive()) return; - Poco::Timespan timeout(keep_alive_timeout, 0); - if (timeout.totalSeconds()) - response.set("Keep-Alive", "timeout=" + std::to_string(timeout.totalSeconds())); + const size_t keep_alive_timeout = response.getSession().getKeepAliveTimeout(); + const size_t keep_alive_max_requests = response.getSession().getMaxKeepAliveRequests(); + if (keep_alive_timeout) + { + if (keep_alive_max_requests) + response.set("Keep-Alive", fmt::format("timeout={}, max={}", keep_alive_timeout, keep_alive_max_requests)); + else + response.set("Keep-Alive", fmt::format("timeout={}", keep_alive_timeout)); + } } HTTPSessionPtr makeHTTPSession( diff --git a/src/IO/HTTPCommon.h b/src/IO/HTTPCommon.h index 3a1fa5bebee..4d0580acaba 100644 --- a/src/IO/HTTPCommon.h +++ b/src/IO/HTTPCommon.h @@ -54,7 +54,7 @@ private: using HTTPSessionPtr = std::shared_ptr; -void setResponseDefaultHeaders(HTTPServerResponse & response, size_t keep_alive_timeout); +void setResponseDefaultHeaders(HTTPServerResponse & response); /// Create session object to perform requests and set required parameters. HTTPSessionPtr makeHTTPSession( diff --git a/src/IO/NetUtils.h b/src/IO/NetUtils.h new file mode 100644 index 00000000000..12f09524ae7 --- /dev/null +++ b/src/IO/NetUtils.h @@ -0,0 +1,58 @@ +#pragma once + +#include +#include + + +namespace DB +{ + +template +constexpr T netToHost(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T hostToNet(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T toLittleEndian(T value) noexcept +{ + if constexpr (std::endian::native == std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T toBigEndian(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T fromLittleEndian(T value) noexcept +{ + if constexpr (std::endian::native == std::endian::big) + return std::byteswap(value); + return value; +} + +template +constexpr T fromBigEndian(T value) noexcept +{ + if constexpr (std::endian::native != std::endian::big) + return std::byteswap(value); + return value; +} + +} diff --git a/src/IO/ReadBufferFromPocoSocket.cpp b/src/IO/ReadBufferFromPocoSocket.cpp index 26cdee4140c..bbf9f96404f 100644 --- a/src/IO/ReadBufferFromPocoSocket.cpp +++ b/src/IO/ReadBufferFromPocoSocket.cpp @@ -32,7 +32,7 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } -bool ReadBufferFromPocoSocket::nextImpl() +ssize_t ReadBufferFromPocoSocketBase::socketReceiveBytesImpl(char * ptr, size_t size) { ssize_t bytes_read = 0; Stopwatch watch; @@ -43,14 +43,11 @@ bool ReadBufferFromPocoSocket::nextImpl() ProfileEvents::increment(ProfileEvents::NetworkReceiveBytes, bytes_read); }); + CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); + /// Add more details to exceptions. try { - CurrentMetrics::Increment metric_increment(CurrentMetrics::NetworkReceive); - - if (internal_buffer.size() > INT_MAX) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); - /// If async_callback is specified, set socket to non-blocking mode /// and try to read data from it, if socket is not ready for reading, /// run async_callback and try again later. @@ -61,7 +58,7 @@ bool ReadBufferFromPocoSocket::nextImpl() socket.setBlocking(false); SCOPE_EXIT(socket.setBlocking(true)); bool secure = socket.secure(); - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); /// Check EAGAIN and ERR_SSL_WANT_READ/ERR_SSL_WANT_WRITE for secure socket (reading from secure socket can write too). while (bytes_read < 0 && (errno == EAGAIN || (secure && (checkSSLWantRead(bytes_read) || checkSSLWantWrite(bytes_read))))) @@ -73,12 +70,12 @@ bool ReadBufferFromPocoSocket::nextImpl() async_callback(socket.impl()->sockfd(), socket.getReceiveTimeout(), AsyncEventTimeoutType::RECEIVE, socket_description, AsyncTaskExecutor::Event::READ | AsyncTaskExecutor::Event::ERROR); /// Try to read again. - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); } } else { - bytes_read = socket.impl()->receiveBytes(internal_buffer.begin(), static_cast(internal_buffer.size())); + bytes_read = socket.impl()->receiveBytes(ptr, static_cast(size)); } } catch (const Poco::Net::NetException & e) @@ -99,6 +96,16 @@ bool ReadBufferFromPocoSocket::nextImpl() if (bytes_read < 0) throw NetException(ErrorCodes::CANNOT_READ_FROM_SOCKET, "Cannot read from socket (peer: {}, local: {})", peer_address.toString(), socket.address().toString()); + return bytes_read; +} + +bool ReadBufferFromPocoSocketBase::nextImpl() +{ + if (internal_buffer.size() > INT_MAX) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Buffer overflow"); + + ssize_t bytes_read = socketReceiveBytesImpl(internal_buffer.begin(), internal_buffer.size()); + if (read_event != ProfileEvents::end()) ProfileEvents::increment(read_event, bytes_read); @@ -110,7 +117,7 @@ bool ReadBufferFromPocoSocket::nextImpl() return true; } -ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size) +ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size) : BufferWithOwnMemory(buf_size) , socket(socket_) , peer_address(socket.peerAddress()) @@ -119,19 +126,22 @@ ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, { } -ReadBufferFromPocoSocket::ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) - : ReadBufferFromPocoSocket(socket_, buf_size) +ReadBufferFromPocoSocketBase::ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBufferFromPocoSocketBase(socket_, buf_size) { read_event = read_event_; } -bool ReadBufferFromPocoSocket::poll(size_t timeout_microseconds) const +bool ReadBufferFromPocoSocketBase::poll(size_t timeout_microseconds) const { - if (available()) + /// For secure socket it is important to check if any remaining data available in underlying decryption buffer - + /// read always retrieves the whole encrypted frame from the wire and puts it into underlying buffer while returning only requested size - + /// further poll() can block though there is still data to read in the underlying decryption buffer. + if (available() || socket.impl()->available()) return true; Stopwatch watch; - bool res = socket.poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); + bool res = socket.impl()->poll(timeout_microseconds, Poco::Net::Socket::SELECT_READ | Poco::Net::Socket::SELECT_ERROR); ProfileEvents::increment(ProfileEvents::NetworkReceiveElapsedMicroseconds, watch.elapsedMicroseconds()); return res; } diff --git a/src/IO/ReadBufferFromPocoSocket.h b/src/IO/ReadBufferFromPocoSocket.h index 76156612764..912388adaac 100644 --- a/src/IO/ReadBufferFromPocoSocket.h +++ b/src/IO/ReadBufferFromPocoSocket.h @@ -9,7 +9,7 @@ namespace DB { /// Works with the ready Poco::Net::Socket. Blocking operations. -class ReadBufferFromPocoSocket : public BufferWithOwnMemory +class ReadBufferFromPocoSocketBase : public BufferWithOwnMemory { protected: Poco::Net::Socket & socket; @@ -25,16 +25,29 @@ protected: bool nextImpl() override; public: - explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); - explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketBase(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); bool poll(size_t timeout_microseconds) const; void setAsyncCallback(AsyncCallback async_callback_) { async_callback = std::move(async_callback_); } + ssize_t socketReceiveBytesImpl(char * ptr, size_t size); + private: AsyncCallback async_callback; std::string socket_description; }; +class ReadBufferFromPocoSocket : public ReadBufferFromPocoSocketBase +{ +public: + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, buf_size) + {} + explicit ReadBufferFromPocoSocket(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE) + : ReadBufferFromPocoSocketBase(socket_, read_event_, buf_size) + {} +}; + } diff --git a/src/IO/ReadBufferFromPocoSocketChunked.cpp b/src/IO/ReadBufferFromPocoSocketChunked.cpp new file mode 100644 index 00000000000..4a1e3732a55 --- /dev/null +++ b/src/IO/ReadBufferFromPocoSocketChunked.cpp @@ -0,0 +1,166 @@ +#include +#include +#include + + +namespace DB::ErrorCodes +{ + extern const int LOGICAL_ERROR; +} + +namespace DB +{ + +ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) + : ReadBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) +{} + +ReadBufferFromPocoSocketChunked::ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size) + : ReadBufferFromPocoSocketBase( + socket_, read_event_, + std::min(buf_size, static_cast(std::numeric_limits::max()))), + our_address(socket_.address()), log(getLogger("Protocol")) +{} + +void ReadBufferFromPocoSocketChunked::enableChunked() +{ + if (chunked) + return; + chunked = 1; + data_end = buffer().end(); + /// Resize working buffer so any next read will call nextImpl + working_buffer.resize(offset()); + chunk_left = 0; + next_chunk = 0; +} + +bool ReadBufferFromPocoSocketChunked::hasBufferedData() const +{ + if (available()) + return true; + + return chunked && (static_cast(data_end - working_buffer.end()) > sizeof(next_chunk)); +} + +bool ReadBufferFromPocoSocketChunked::poll(size_t timeout_microseconds) const +{ + if (chunked) + if (available() || static_cast(data_end - working_buffer.end()) > sizeof(next_chunk)) + return true; + + return ReadBufferFromPocoSocketBase::poll(timeout_microseconds); +} + + +bool ReadBufferFromPocoSocketChunked::loadNextChunk(Position c_pos, bool cont) +{ + auto buffered = std::min(static_cast(data_end - c_pos), sizeof(next_chunk)); + + if (buffered) + std::memcpy(&next_chunk, c_pos, buffered); + if (buffered < sizeof(next_chunk)) + if (socketReceiveBytesImpl(reinterpret_cast(&next_chunk) + buffered, sizeof(next_chunk) - buffered) < static_cast(sizeof(next_chunk) - buffered)) + return false; + next_chunk = fromLittleEndian(next_chunk); + + if (next_chunk) + { + if (cont) + LOG_TEST(log, "{} <- {} Chunk receive continued. Size {}", ourAddress().toString(), peerAddress().toString(), next_chunk); + } + else + LOG_TEST(log, "{} <- {} Chunk receive ended.", ourAddress().toString(), peerAddress().toString()); + + return true; +} + +bool ReadBufferFromPocoSocketChunked::processChunkLeft(Position c_pos) +{ + if (data_end - c_pos < chunk_left) + { + working_buffer.resize(data_end - buffer().begin()); + nextimpl_working_buffer_offset = c_pos - buffer().begin(); + chunk_left -= (data_end - c_pos); + return true; + } + + nextimpl_working_buffer_offset = c_pos - buffer().begin(); + working_buffer.resize(nextimpl_working_buffer_offset + chunk_left); + + c_pos += chunk_left; + + if (!loadNextChunk(c_pos, true)) + return false; + + chunk_left = 0; + return true; +} + + +bool ReadBufferFromPocoSocketChunked::nextImpl() +{ + if (!chunked) + return ReadBufferFromPocoSocketBase::nextImpl(); + + auto * c_pos = pos; + + if (chunk_left == 0) + { + if (next_chunk == 0) + { + if (chunked == 1) + chunked = 2; // first chunked block - no end marker + else + c_pos = pos + sizeof(next_chunk); // bypass chunk end marker + + if (c_pos > data_end) + c_pos = data_end; + + if (!loadNextChunk(c_pos)) + return false; + + chunk_left = next_chunk; + next_chunk = 0; + + if (chunk_left == 0) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Native protocol: empty chunk received"); + + c_pos += sizeof(next_chunk); + + if (c_pos >= data_end) + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + LOG_TEST(log, "{} <- {} Chunk receive started. Message {}, size {}", ourAddress().toString(), peerAddress().toString(), static_cast(*c_pos), chunk_left); + } + else + { + c_pos += sizeof(next_chunk); + if (c_pos >= data_end) + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + chunk_left = next_chunk; + next_chunk = 0; + } + } + else + { + if (!ReadBufferFromPocoSocketBase::nextImpl()) + return false; + data_end = buffer().end(); + c_pos = buffer().begin(); + } + + return processChunkLeft(c_pos); +} + +} diff --git a/src/IO/ReadBufferFromPocoSocketChunked.h b/src/IO/ReadBufferFromPocoSocketChunked.h new file mode 100644 index 00000000000..8bc4024b978 --- /dev/null +++ b/src/IO/ReadBufferFromPocoSocketChunked.h @@ -0,0 +1,109 @@ +#pragma once + +#include +#include + +/* + +Handshake +============= + | 'Hello' type + | handshake exchange + | chunked protocol negotiation + +============= + + +Basic chunk: + +============= +Chunk begins | 0x12345678 chunk size, 4 bytes little endian + +------------- + | Packet type always follows beginning of the chunk + | packet data + +------------- +Chunk ends | 0x00000000 4 zero bytes + +============= + + +Datastream chunk: + +============= +Chunk begins | 0x12345678 + +------------- + | Packet type + | packet data + +------------- + | Packet type + | packet data + +------------- +...arbitrary number ..... +of packets... ..... + +------------- + | Packet type + | packet data + +------------- +Chunk ends | 0x00000000 + +============= + + +Multipart chunk: + +============= +Chunk begins | 0x12345678 chunk part size, 4 bytes little endian + +------------- + | Packet type + | packet data + +------------- + | Packet type + | (partial) packet data + +============= +Chunk continues | 0x12345678 chunk next part size, 4 bytes little endian + +============= + | possibly previous packet's data + +------------- + | Packet type + | packet data + +------------- +...arbitrary number ..... +of chunk parts... ..... + +------------- + | Packet type + | packet data + +------------- +Chunk ends | 0x00000000 + +============= + +*/ + +namespace DB +{ + +class ReadBufferFromPocoSocketChunked: public ReadBufferFromPocoSocketBase +{ +public: + using ReadBufferFromPocoSocketBase::setAsyncCallback; + + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit ReadBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & read_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + + void enableChunked(); + + bool hasBufferedData() const; + + bool poll(size_t timeout_microseconds) const; + + Poco::Net::SocketAddress peerAddress() { return peer_address; } + Poco::Net::SocketAddress ourAddress() { return our_address; } + +protected: + bool loadNextChunk(Position c_pos, bool cont = false); + bool processChunkLeft(Position c_pos); + bool nextImpl() override; + + Poco::Net::SocketAddress our_address; + +private: + LoggerPtr log; + Position data_end = nullptr; // end position of data in the internal_buffer + UInt32 chunk_left = 0; // chunk left to read from socket + UInt32 next_chunk = 0; // size of the next cnunk + UInt8 chunked = 0; // 0 - disabled; 1 - started; 2 - enabled; +}; + +} diff --git a/src/IO/S3/Client.cpp b/src/IO/S3/Client.cpp index a966e370ca1..8338a235387 100644 --- a/src/IO/S3/Client.cpp +++ b/src/IO/S3/Client.cpp @@ -46,7 +46,7 @@ namespace ProfileEvents namespace CurrentMetrics { - extern const Metric S3DiskNoKeyErrors; + extern const Metric DiskS3NoSuchKeyErrors; } namespace DB @@ -701,7 +701,7 @@ RequestResult Client::processRequestResult(RequestResult && outcome) const return std::forward(outcome); if (outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY) - CurrentMetrics::add(CurrentMetrics::S3DiskNoKeyErrors); + CurrentMetrics::add(CurrentMetrics::DiskS3NoSuchKeyErrors); String enriched_message = fmt::format( "{} {}", diff --git a/src/IO/S3Common.cpp b/src/IO/S3Common.cpp index a794cdbcf05..59040bf1fea 100644 --- a/src/IO/S3Common.cpp +++ b/src/IO/S3Common.cpp @@ -294,21 +294,21 @@ void RequestSettings::finishInit(const DB::Settings & settings, bool validate_se /// to avoid losing token bucket state on every config reload, /// which could lead to exceeding limit for short time. /// But it is good enough unless very high `burst` values are used. - if (UInt64 max_get_rps = isChanged("max_get_rps") ? get("max_get_rps").get() : settings.s3_max_get_rps) + if (UInt64 max_get_rps = isChanged("max_get_rps") ? get("max_get_rps").safeGet() : settings.s3_max_get_rps) { size_t default_max_get_burst = settings.s3_max_get_burst ? settings.s3_max_get_burst : (Throttler::default_burst_seconds * max_get_rps); - size_t max_get_burst = isChanged("max_get_burts") ? get("max_get_burst").get() : default_max_get_burst; + size_t max_get_burst = isChanged("max_get_burts") ? get("max_get_burst").safeGet() : default_max_get_burst; get_request_throttler = std::make_shared(max_get_rps, max_get_burst); } - if (UInt64 max_put_rps = isChanged("max_put_rps") ? get("max_put_rps").get() : settings.s3_max_put_rps) + if (UInt64 max_put_rps = isChanged("max_put_rps") ? get("max_put_rps").safeGet() : settings.s3_max_put_rps) { size_t default_max_put_burst = settings.s3_max_put_burst ? settings.s3_max_put_burst : (Throttler::default_burst_seconds * max_put_rps); - size_t max_put_burst = isChanged("max_put_burts") ? get("max_put_burst").get() : default_max_put_burst; + size_t max_put_burst = isChanged("max_put_burts") ? get("max_put_burst").safeGet() : default_max_put_burst; put_request_throttler = std::make_shared(max_put_rps, max_put_burst); } } diff --git a/src/IO/WriteBuffer.h b/src/IO/WriteBuffer.h index 4759f96a235..84b1079b824 100644 --- a/src/IO/WriteBuffer.h +++ b/src/IO/WriteBuffer.h @@ -64,7 +64,8 @@ public: } bytes += bytes_in_buffer; - pos = working_buffer.begin(); + pos = working_buffer.begin() + nextimpl_working_buffer_offset; + nextimpl_working_buffer_offset = 0; } /// Calling finalize() in the destructor of derived classes is a bad practice. @@ -164,6 +165,11 @@ protected: bool finalized = false; bool canceled = false; + /// The number of bytes to preserve from the initial position of `working_buffer` + /// buffer. Apparently this is an additional out-parameter for nextImpl(), + /// not a real field. + size_t nextimpl_working_buffer_offset = 0; + private: /** Write the data in the buffer (from the beginning of the buffer to the current position). * Throw an exception if something is wrong. diff --git a/src/IO/WriteBufferFromPocoSocket.cpp b/src/IO/WriteBufferFromPocoSocket.cpp index 5ed4dbdc787..ffb38a384a0 100644 --- a/src/IO/WriteBufferFromPocoSocket.cpp +++ b/src/IO/WriteBufferFromPocoSocket.cpp @@ -183,6 +183,7 @@ WriteBufferFromPocoSocket::WriteBufferFromPocoSocket(Poco::Net::Socket & socket_ , socket(socket_) , peer_address(socket.peerAddress()) , our_address(socket.address()) + , write_event(ProfileEvents::end()) , socket_description("socket (" + peer_address.toString() + ")") { } diff --git a/src/IO/WriteBufferFromPocoSocketChunked.cpp b/src/IO/WriteBufferFromPocoSocketChunked.cpp new file mode 100644 index 00000000000..9da46ee2d10 --- /dev/null +++ b/src/IO/WriteBufferFromPocoSocketChunked.cpp @@ -0,0 +1,210 @@ +#include +#include +#include + + +namespace +{ + +template +void setValue(T * typed_ptr, std::type_identity_t val) +{ + memcpy(static_cast(typed_ptr), &val, sizeof(T)); +} + +} + +namespace DB +{ + +WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size) + : WriteBufferFromPocoSocketChunked(socket_, ProfileEvents::end(), buf_size) +{} + +WriteBufferFromPocoSocketChunked::WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size) + : WriteBufferFromPocoSocket( + socket_, write_event_, + std::clamp(buf_size, sizeof(*chunk_size_ptr) + 1, static_cast(std::numeric_limits>::max()))), + log(getLogger("Protocol")) +{} + +void WriteBufferFromPocoSocketChunked::enableChunked() +{ + chunked = true; + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + /// Pretend finishChunk() was just called to prevent sending empty chunk if finishChunk() called immediately + last_finish_chunk = chunk_size_ptr; +} + +void WriteBufferFromPocoSocketChunked::finishChunk() +{ + if (!chunked) + return; + + if (pos <= reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr)) + { + /// Prevent duplicate finish chunk (and finish chunk right after enableChunked()) + if (chunk_size_ptr == last_finish_chunk) + return; + + /// If current chunk is empty it means we are finishing a chunk previously sent by next(), + /// we want to convert current chunk header into end-of-chunk marker and initialize next chunk. + /// We don't need to worry about if it's the end of the buffer because next() always sends the whole buffer + /// so it should be a beginning of the buffer. + + chassert(reinterpret_cast(chunk_size_ptr) == working_buffer.begin()); + + setValue(chunk_size_ptr, 0); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Previously finished chunk wasn't sent yet + if (last_finish_chunk == chunk_size_ptr) + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + } + + /// Fill up current chunk size + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); + + if (!chunk_started) + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + else + { + chunk_started = false; + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + + LOG_TEST(log, "{} -> {} Chunk send ended.", ourAddress().toString(), peerAddress().toString()); + + if (available() < sizeof(*chunk_size_ptr)) + { + finishing = available(); + pos += available(); + chunk_size_ptr = reinterpret_cast(pos); + last_finish_chunk = chunk_size_ptr; + return; + } + + /// Buffer end-of-chunk + setValue(reinterpret_cast(pos), 0); + pos += sizeof(*chunk_size_ptr); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(pos); + pos += std::min(available(), sizeof(*chunk_size_ptr)); + + last_finish_chunk = chunk_size_ptr; +} + +WriteBufferFromPocoSocketChunked::~WriteBufferFromPocoSocketChunked() +{ + try + { + finalize(); + } + catch (...) + { + tryLogCurrentException(__PRETTY_FUNCTION__); + } +} + +void WriteBufferFromPocoSocketChunked::nextImpl() +{ + if (!chunked) + { + WriteBufferFromPocoSocket::nextImpl(); + return; + } + + /// next() after finishChunk at the end of the buffer + if (finishing < sizeof(*chunk_size_ptr)) + { + pos -= finishing; + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Send end-of-chunk directly + UInt32 s = 0; + socketSendBytes(reinterpret_cast(&s), sizeof(s)); + + finishing = sizeof(*chunk_size_ptr); + + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = chunk_size_ptr; + + return; + } + + /// Prevent sending empty chunk + if (offset() == sizeof(*chunk_size_ptr)) + { + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + return; + } + + /// Finish chunk at the end of the buffer + if (working_buffer.end() - reinterpret_cast(chunk_size_ptr) <= static_cast(sizeof(*chunk_size_ptr))) + { + pos = reinterpret_cast(chunk_size_ptr); + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = nullptr; + + return; + } + + bool initialize_last_finish_chunk = false; + if (pos - reinterpret_cast(chunk_size_ptr) == sizeof(*chunk_size_ptr)) // next() after finishChunk + { + pos -= sizeof(*chunk_size_ptr); + initialize_last_finish_chunk = true; + } + else // fill up current chunk size + { + setValue(chunk_size_ptr, toLittleEndian(static_cast(pos - reinterpret_cast(chunk_size_ptr) - sizeof(*chunk_size_ptr)))); + if (!chunk_started) + { + chunk_started = true; + LOG_TEST(log, "{} -> {} Chunk send started. Message {}, size {}", + ourAddress().toString(), peerAddress().toString(), + static_cast(*(reinterpret_cast(chunk_size_ptr) + sizeof(*chunk_size_ptr))), + *chunk_size_ptr); + } + else + LOG_TEST(log, "{} -> {} Chunk send continued. Size {}", ourAddress().toString(), peerAddress().toString(), *chunk_size_ptr); + } + /// Send current chunk + WriteBufferFromPocoSocket::nextImpl(); + /// Initialize next chunk + chunk_size_ptr = reinterpret_cast(working_buffer.begin()); + nextimpl_working_buffer_offset = sizeof(*chunk_size_ptr); + + last_finish_chunk = initialize_last_finish_chunk ? chunk_size_ptr : nullptr; +} + +void WriteBufferFromPocoSocketChunked::finalizeImpl() +{ + if (chunked && offset() == sizeof(*chunk_size_ptr)) + pos -= sizeof(*chunk_size_ptr); + WriteBufferFromPocoSocket::finalizeImpl(); +} + +} diff --git a/src/IO/WriteBufferFromPocoSocketChunked.h b/src/IO/WriteBufferFromPocoSocketChunked.h new file mode 100644 index 00000000000..13a277e3bfb --- /dev/null +++ b/src/IO/WriteBufferFromPocoSocketChunked.h @@ -0,0 +1,36 @@ +#pragma once + +#include +#include +#include + + +namespace DB +{ + +class WriteBufferFromPocoSocketChunked: public WriteBufferFromPocoSocket +{ +public: + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + explicit WriteBufferFromPocoSocketChunked(Poco::Net::Socket & socket_, const ProfileEvents::Event & write_event_, size_t buf_size = DBMS_DEFAULT_BUFFER_SIZE); + + void enableChunked(); + void finishChunk(); + ~WriteBufferFromPocoSocketChunked() override; + +protected: + void nextImpl() override; + void finalizeImpl() override; + Poco::Net::SocketAddress peerAddress() const { return peer_address; } + Poco::Net::SocketAddress ourAddress() const { return our_address; } + +private: + LoggerPtr log; + bool chunked = false; + UInt32 * last_finish_chunk = nullptr; // pointer to the last chunk header created by finishChunk + bool chunk_started = false; // chunk started flag + UInt32 * chunk_size_ptr = nullptr; // pointer to the chunk size holder in the buffer + size_t finishing = sizeof(*chunk_size_ptr); // indicates not enough buffer for end-of-chunk marker +}; + +} diff --git a/src/Interpreters/ActionsDAG.cpp b/src/Interpreters/ActionsDAG.cpp index df1c0aa1f2a..2a594839c6a 100644 --- a/src/Interpreters/ActionsDAG.cpp +++ b/src/Interpreters/ActionsDAG.cpp @@ -301,11 +301,11 @@ const ActionsDAG::Node & ActionsDAG::addCast(const Node & node_to_cast, const Da column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); column.type = std::make_shared(); - const auto * cast_type_constant_node = &addColumn(std::move(column)); + const auto * cast_type_constant_node = &addColumn(column); ActionsDAG::NodeRawConstPtrs children = {&node_to_cast, cast_type_constant_node}; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); + auto func_base_cast = createInternalCast(ColumnWithTypeAndName{node_to_cast.result_type, node_to_cast.result_name}, cast_type, CastType::nonAccurate, {}); - return addFunction(func_builder_cast, std::move(children), result_name); + return addFunction(func_base_cast, std::move(children), result_name); } const ActionsDAG::Node & ActionsDAG::addFunctionImpl( @@ -1547,11 +1547,11 @@ ActionsDAG ActionsDAG::makeConvertingActions( const auto * left_arg = dst_node; CastDiagnostic diagnostic = {dst_node->result_name, res_elem.name}; - FunctionOverloadResolverPtr func_builder_cast - = createInternalCastOverloadResolver(CastType::nonAccurate, std::move(diagnostic)); + ColumnWithTypeAndName left_column{nullptr, dst_node->result_type, {}}; + auto func_base_cast = createInternalCast(std::move(left_column), res_elem.type, CastType::nonAccurate, std::move(diagnostic)); NodeRawConstPtrs children = { left_arg, right_arg }; - dst_node = &actions_dag.addFunction(func_builder_cast, std::move(children), {}); + dst_node = &actions_dag.addFunction(func_base_cast, std::move(children), {}); } if (dst_node->column && isColumnConst(*dst_node->column) && !(res_elem.column && isColumnConst(*res_elem.column))) diff --git a/src/Interpreters/ActionsVisitor.cpp b/src/Interpreters/ActionsVisitor.cpp index e1b7e92ee5d..368eb8174f0 100644 --- a/src/Interpreters/ActionsVisitor.cpp +++ b/src/Interpreters/ActionsVisitor.cpp @@ -131,7 +131,7 @@ static Block createBlockFromCollection(const Collection & collection, const Data throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Invalid type in set. Expected tuple, got {}", String(value.getTypeName())); - const auto & tuple = value.template get(); + const auto & tuple = value.template safeGet(); size_t tuple_size = tuple.size(); if (tuple_size != columns_num) throw Exception(ErrorCodes::INCORRECT_ELEMENT_OF_SET, "Incorrect size of tuple in set: {} instead of {}", @@ -233,7 +233,7 @@ static Block createBlockFromAST(const ASTPtr & node, const DataTypes & types, Co "Invalid type of set. Expected tuple, got {}", function_result.getTypeName()); - tuple = &function_result.get(); + tuple = &function_result.safeGet(); } /// Tuple can be represented as a literal in AST. @@ -246,7 +246,7 @@ static Block createBlockFromAST(const ASTPtr & node, const DataTypes & types, Co "Invalid type in set. Expected tuple, got {}", literal->value.getTypeName()); - tuple = &literal->value.get(); + tuple = &literal->value.safeGet(); } assert(tuple || func); @@ -332,14 +332,14 @@ Block createBlockForSet( if (type_index == TypeIndex::Tuple) { const DataTypes & value_types = assert_cast(right_arg_type.get())->getElements(); - block = createBlockFromCollection(right_arg_value.get(), value_types, set_element_types, tranform_null_in); + block = createBlockFromCollection(right_arg_value.safeGet(), value_types, set_element_types, tranform_null_in); } else if (type_index == TypeIndex::Array) { const auto* right_arg_array_type = assert_cast(right_arg_type.get()); - size_t right_arg_array_size = right_arg_value.get().size(); + size_t right_arg_array_size = right_arg_value.safeGet().size(); DataTypes value_types(right_arg_array_size, right_arg_array_type->getNestedType()); - block = createBlockFromCollection(right_arg_value.get(), value_types, set_element_types, tranform_null_in); + block = createBlockFromCollection(right_arg_value.safeGet(), value_types, set_element_types, tranform_null_in); } else throw_unsupported_type(right_arg_type); diff --git a/src/Interpreters/AddDefaultDatabaseVisitor.h b/src/Interpreters/AddDefaultDatabaseVisitor.h index a28c7c1bff3..d59fd35df77 100644 --- a/src/Interpreters/AddDefaultDatabaseVisitor.h +++ b/src/Interpreters/AddDefaultDatabaseVisitor.h @@ -210,7 +210,7 @@ private: if (literal_value.getType() != Field::Types::String) continue; - auto dictionary_name = literal_value.get(); + auto dictionary_name = literal_value.safeGet(); auto qualified_dictionary_name = context->getExternalDictionariesLoader().qualifyDictionaryNameWithDatabase(dictionary_name, context); literal_value = qualified_dictionary_name.getFullName(); } diff --git a/src/Interpreters/Cache/QueryCache.cpp b/src/Interpreters/Cache/QueryCache.cpp index a3fe8c2e779..ab926037c67 100644 --- a/src/Interpreters/Cache/QueryCache.cpp +++ b/src/Interpreters/Cache/QueryCache.cpp @@ -128,7 +128,7 @@ namespace bool isQueryCacheRelatedSetting(const String & setting_name) { - return setting_name.starts_with("query_cache_") || setting_name.ends_with("_query_cache"); + return (setting_name.starts_with("query_cache_") || setting_name.ends_with("_query_cache")) && setting_name != "query_cache_tag"; } class RemoveQueryCacheSettingsMatcher @@ -242,11 +242,18 @@ QueryCache::Key::Key( , expires_at(expires_at_) , is_compressed(is_compressed_) , query_string(queryStringFromAST(ast_)) + , tag(settings.query_cache_tag) { } -QueryCache::Key::Key(ASTPtr ast_, const String & current_database, const Settings & settings, std::optional user_id_, const std::vector & current_user_roles_) - : QueryCache::Key(ast_, current_database, settings, {}, user_id_, current_user_roles_, false, std::chrono::system_clock::from_time_t(1), false) /// dummy values for everything != AST, current database, user name/roles +QueryCache::Key::Key( + ASTPtr ast_, + const String & current_database, + const Settings & settings, + std::optional user_id_, + const std::vector & current_user_roles_) + : QueryCache::Key(ast_, current_database, settings, {}, user_id_, current_user_roles_, false, std::chrono::system_clock::from_time_t(1), false) + /// ^^ dummy values for everything != AST, current database, user name/roles { } diff --git a/src/Interpreters/Cache/QueryCache.h b/src/Interpreters/Cache/QueryCache.h index 461197cac32..c7ebaf4d26a 100644 --- a/src/Interpreters/Cache/QueryCache.h +++ b/src/Interpreters/Cache/QueryCache.h @@ -88,6 +88,11 @@ public: /// SYSTEM.QUERY_CACHE. const String query_string; + /// A tag (namespace) for distinguish multiple entries of the same query. + /// This member has currently no use besides that SYSTEM.QUERY_CACHE can populate the 'tag' column conveniently without having to + /// compute the tag from the query AST. + const String tag; + /// Ctor to construct a Key for writing into query cache. Key(ASTPtr ast_, const String & current_database, @@ -99,7 +104,10 @@ public: bool is_compressed); /// Ctor to construct a Key for reading from query cache (this operation only needs the AST + user name). - Key(ASTPtr ast_, const String & current_database, const Settings & settings, std::optional user_id_, const std::vector & current_user_roles_); + Key(ASTPtr ast_, + const String & current_database, + const Settings & settings, + std::optional user_id_, const std::vector & current_user_roles_); bool operator==(const Key & other) const; }; diff --git a/src/Interpreters/Cluster.cpp b/src/Interpreters/Cluster.cpp index 59c98491c14..dd9e35834eb 100644 --- a/src/Interpreters/Cluster.cpp +++ b/src/Interpreters/Cluster.cpp @@ -113,6 +113,9 @@ Cluster::Address::Address( secure = ConfigHelper::getBool(config, config_prefix + ".secure", false, /* empty_as */true) ? Protocol::Secure::Enable : Protocol::Secure::Disable; priority = Priority{config.getInt(config_prefix + ".priority", 1)}; + proto_send_chunked = config.getString(config_prefix + ".proto_caps.send", "notchunked"); + proto_recv_chunked = config.getString(config_prefix + ".proto_caps.recv", "notchunked"); + const char * port_type = secure == Protocol::Secure::Enable ? "tcp_port_secure" : "tcp_port"; auto default_port = config.getInt(port_type, 0); @@ -425,7 +428,9 @@ Cluster::Cluster(const Poco::Util::AbstractConfiguration & config, auto pool = ConnectionPoolFactory::instance().get( static_cast(settings.distributed_connections_pool_size), address.host_name, address.port, - address.default_database, address.user, address.password, address.quota_key, + address.default_database, address.user, address.password, + address.proto_send_chunked, address.proto_recv_chunked, + address.quota_key, address.cluster, address.cluster_secret, "server", address.compression, address.secure, address.priority); @@ -589,6 +594,8 @@ void Cluster::addShard( replica.default_database, replica.user, replica.password, + replica.proto_send_chunked, + replica.proto_recv_chunked, replica.quota_key, replica.cluster, replica.cluster_secret, @@ -744,6 +751,8 @@ Cluster::Cluster(Cluster::ReplicasAsShardsTag, const Cluster & from, const Setti address.default_database, address.user, address.password, + address.proto_send_chunked, + address.proto_recv_chunked, address.quota_key, address.cluster, address.cluster_secret, diff --git a/src/Interpreters/Cluster.h b/src/Interpreters/Cluster.h index dc5790ac339..c69d77668ab 100644 --- a/src/Interpreters/Cluster.h +++ b/src/Interpreters/Cluster.h @@ -114,6 +114,8 @@ public: UInt16 port{0}; String user; String password; + String proto_send_chunked = "notchunked"; + String proto_recv_chunked = "notchunked"; String quota_key; /// For inter-server authorization diff --git a/src/Interpreters/ComparisonTupleEliminationVisitor.cpp b/src/Interpreters/ComparisonTupleEliminationVisitor.cpp index 4f06f345b96..b9f7f37b338 100644 --- a/src/Interpreters/ComparisonTupleEliminationVisitor.cpp +++ b/src/Interpreters/ComparisonTupleEliminationVisitor.cpp @@ -22,7 +22,7 @@ ASTs splitTuple(const ASTPtr & node) if (const auto * literal = node->as(); literal && literal->value.getType() == Field::Types::Tuple) { ASTs result; - const auto & tuple = literal->value.get(); + const auto & tuple = literal->value.safeGet(); for (const auto & child : tuple) result.emplace_back(std::make_shared(child)); return result; diff --git a/src/Interpreters/Context.cpp b/src/Interpreters/Context.cpp index 5413b568068..4a08fd5fe5b 100644 --- a/src/Interpreters/Context.cpp +++ b/src/Interpreters/Context.cpp @@ -99,6 +99,7 @@ #include #include #include +#include #include #include #include @@ -618,7 +619,7 @@ struct ContextSharedPart : boost::noncopyable /** After system_logs have been shut down it is guaranteed that no system table gets created or written to. * Note that part changes at shutdown won't be logged to part log. */ - SHUTDOWN(log, "system logs", system_logs, shutdown()); + SHUTDOWN(log, "system logs", system_logs, flushAndShutdown()); LOG_TRACE(log, "Shutting down database catalog"); DatabaseCatalog::shutdown(); @@ -4255,7 +4256,7 @@ std::shared_ptr Context::getS3QueueLog() const if (!shared->system_logs) return {}; - return shared->system_logs->s3_queue_log; + return shared->system_logs->s3queue_log; } std::shared_ptr Context::getAzureQueueLog() const @@ -4312,13 +4313,13 @@ std::shared_ptr Context::getBlobStorageLog() const return shared->system_logs->blob_storage_log; } -std::vector Context::getSystemLogs() const +SystemLogs Context::getSystemLogs() const { SharedLockGuard lock(shared->mutex); if (!shared->system_logs) return {}; - return shared->system_logs->logs; + return *shared->system_logs; } std::optional Context::getDashboards() const diff --git a/src/Interpreters/Context.h b/src/Interpreters/Context.h index d5e35c3e4b3..3da4f124553 100644 --- a/src/Interpreters/Context.h +++ b/src/Interpreters/Context.h @@ -48,6 +48,8 @@ namespace DB class ASTSelectQuery; +class SystemLogs; + struct ContextSharedPart; class ContextAccess; class ContextAccessWrapper; @@ -1150,7 +1152,7 @@ public: std::shared_ptr getBackupLog() const; std::shared_ptr getBlobStorageLog() const; - std::vector getSystemLogs() const; + SystemLogs getSystemLogs() const; using Dashboards = std::vector>; std::optional getDashboards() const; diff --git a/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp b/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp index 084bb0a1bb9..220355e0741 100644 --- a/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp +++ b/src/Interpreters/ConvertFunctionOrLikeVisitor.cpp @@ -45,7 +45,7 @@ void ConvertFunctionOrLikeData::visit(ASTFunction & function, ASTPtr &) if (!identifier || !literal || literal->value.getType() != Field::Types::String) continue; - String regexp = likePatternToRegexp(literal->value.get()); + String regexp = likePatternToRegexp(literal->value.safeGet()); /// Case insensitive. Works with UTF-8 as well. if (is_ilike) regexp = "(?i)" + regexp; @@ -61,7 +61,7 @@ void ConvertFunctionOrLikeData::visit(ASTFunction & function, ASTPtr &) match->arguments->children.push_back(it->second); unique_elems.push_back(std::move(match)); } - it->second->value.get().push_back(regexp); + it->second->value.safeGet().push_back(regexp); } } diff --git a/src/Interpreters/ConvertStringsToEnumVisitor.cpp b/src/Interpreters/ConvertStringsToEnumVisitor.cpp index 7cc95dc521b..d35baa92900 100644 --- a/src/Interpreters/ConvertStringsToEnumVisitor.cpp +++ b/src/Interpreters/ConvertStringsToEnumVisitor.cpp @@ -33,8 +33,8 @@ String makeStringsEnum(const std::set & values) void changeIfArguments(ASTPtr & first, ASTPtr & second) { - String first_value = first->as()->value.get(); - String second_value = second->as()->value.get(); + String first_value = first->as()->value.safeGet(); + String second_value = second->as()->value.safeGet(); std::set values; values.insert(first_value); @@ -59,9 +59,9 @@ void changeTransformArguments(ASTPtr & array_to, ASTPtr & other) { std::set values; - for (const auto & item : array_to->as()->value.get()) - values.insert(item.get()); - values.insert(other->as()->value.get()); + for (const auto & item : array_to->as()->value.safeGet()) + values.insert(item.safeGet()); + values.insert(other->as()->value.safeGet()); String enum_string = makeStringsEnum(values); @@ -168,7 +168,7 @@ void ConvertStringsToEnumMatcher::visit(ASTFunction & function_node, Data & data if (literal_to->value.getTypeName() != "Array" || literal_other->value.getTypeName() != "String") return; - Array array_to = literal_to->value.get(); + Array array_to = literal_to->value.safeGet(); if (array_to.empty()) return; diff --git a/src/Interpreters/DDLTask.cpp b/src/Interpreters/DDLTask.cpp index fa197d59c13..6e08dd5e2cc 100644 --- a/src/Interpreters/DDLTask.cpp +++ b/src/Interpreters/DDLTask.cpp @@ -538,7 +538,7 @@ void DatabaseReplicatedTask::createSyncedNodeIfNeed(const ZooKeeperPtr & zookeep /// Bool type is really weird, sometimes it's Bool and sometimes it's UInt64... assert(value.getType() == Field::Types::Bool || value.getType() == Field::Types::UInt64); - if (!value.get()) + if (!value.safeGet()) return; zookeeper->createIfNotExists(getSyncedNodePath(), ""); diff --git a/src/Interpreters/HashJoin/FullHashJoin.cpp b/src/Interpreters/HashJoin/FullHashJoin.cpp index 5d058d10fc2..4cdb2e757a4 100644 --- a/src/Interpreters/HashJoin/FullHashJoin.cpp +++ b/src/Interpreters/HashJoin/FullHashJoin.cpp @@ -1,4 +1,4 @@ -#include +#include namespace DB { diff --git a/src/Interpreters/HashJoin/HashJoin.cpp b/src/Interpreters/HashJoin/HashJoin.cpp index a621ce16fb1..dd7d42de63e 100644 --- a/src/Interpreters/HashJoin/HashJoin.cpp +++ b/src/Interpreters/HashJoin/HashJoin.cpp @@ -291,12 +291,13 @@ void HashJoin::dataMapInit(MapsVariant & map) { if (kind == JoinKind::Cross) return; - joinDispatchInit(kind, strictness, map); - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { map_.create(data->type); }); + auto prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; + joinDispatchInit(kind, strictness, map, prefer_use_maps_all); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { map_.create(data->type); }); if (reserve_num) { - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { map_.reserve(data->type, reserve_num); }); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { map_.reserve(data->type, reserve_num); }); } if (!data) @@ -327,9 +328,10 @@ size_t HashJoin::getTotalRowCount() const } else { + auto prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; for (const auto & map : data->maps) { - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { res += map_.getTotalRowCount(data->type); }); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { res += map_.getTotalRowCount(data->type); }); } } @@ -367,9 +369,10 @@ size_t HashJoin::getTotalByteCount() const if (data->type != Type::CROSS) { + auto prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; for (const auto & map : data->maps) { - joinDispatch(kind, strictness, map, [&](auto, auto, auto & map_) { res += map_.getTotalByteCountImpl(data->type); }); + joinDispatch(kind, strictness, map, prefer_use_maps_all, [&](auto, auto, auto & map_) { res += map_.getTotalByteCountImpl(data->type); }); } } return res; @@ -520,6 +523,8 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) return true; } + bool prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; + size_t total_rows = 0; size_t total_bytes = 0; { @@ -592,7 +597,7 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) bool is_inserted = false; if (kind != JoinKind::Cross) { - joinDispatch(kind, strictness, data->maps[onexpr_idx], [&](auto kind_, auto strictness_, auto & map) + joinDispatch(kind, strictness, data->maps[onexpr_idx], prefer_use_maps_all, [&](auto kind_, auto strictness_, auto & map) { size_t size = HashJoinMethods>::insertFromBlockImpl( *this, @@ -608,10 +613,10 @@ bool HashJoin::addBlockToJoin(const Block & source_block_, bool check_limits) is_inserted); if (flag_per_row) - used_flags->reinit(stored_block); + used_flags->reinit, MapsAll>>(stored_block); else if (is_inserted) /// Number of buckets + 1 value from zero storage - used_flags->reinit(size + 1); + used_flags->reinit, MapsAll>>(size + 1); }); } @@ -873,7 +878,7 @@ ColumnWithTypeAndName HashJoin::joinGet(const Block & block, const Block & block keys.insert(std::move(key)); } - static_assert(!MapGetter::flagged, + static_assert(!MapGetter::flagged, "joinGet are not protected from hash table changes between block processing"); std::vector maps_vector; @@ -914,16 +919,34 @@ void HashJoin::joinBlock(Block & block, ExtraBlockPtr & not_processed) materializeBlockInplace(block); } + bool prefer_use_maps_all = table_join->getMixedJoinExpression() != nullptr; { std::vectormaps[0])> * > maps_vector; for (size_t i = 0; i < table_join->getClauses().size(); ++i) maps_vector.push_back(&data->maps[i]); - if (joinDispatch(kind, strictness, maps_vector, [&](auto kind_, auto strictness_, auto & maps_vector_) + if (joinDispatch(kind, strictness, maps_vector, prefer_use_maps_all, [&](auto kind_, auto strictness_, auto & maps_vector_) { - using MapType = typename MapGetter::Map; - Block remaining_block = HashJoinMethods::joinBlockImpl( - *this, block, sample_block_with_columns_to_add, maps_vector_); + Block remaining_block; + if constexpr (std::is_same_v, std::vector>) + { + remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + } + else if constexpr (std::is_same_v, std::vector>) + { + remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + } + else if constexpr (std::is_same_v, std::vector>) + { + remaining_block = HashJoinMethods::joinBlockImpl( + *this, block, sample_block_with_columns_to_add, maps_vector_); + } + else + { + throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown maps type"); + } if (remaining_block.rows()) not_processed = std::make_shared(ExtraBlock{std::move(remaining_block)}); else @@ -1023,7 +1046,8 @@ public: rows_added = fillColumnsFromMap(map, columns_right); }; - if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps.front(), fill_callback)) + bool prefer_use_maps_all = parent.table_join->getMixedJoinExpression() != nullptr; + if (!joinDispatch(parent.kind, parent.strictness, parent.data->maps.front(), prefer_use_maps_all, fill_callback)) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown JOIN strictness '{}' (must be on of: ANY, ALL, ASOF)", parent.strictness); } @@ -1220,11 +1244,12 @@ void HashJoin::reuseJoinedData(const HashJoin & join) if (flag_per_row) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "StorageJoin with ORs is not supported"); + bool prefer_use_maps_all = join.table_join->getMixedJoinExpression() != nullptr; for (auto & map : data->maps) { - joinDispatch(kind, strictness, map, [this](auto kind_, auto strictness_, auto & map_) + joinDispatch(kind, strictness, map, prefer_use_maps_all, [this](auto kind_, auto strictness_, auto & map_) { - used_flags->reinit(map_.getBufferSizeInCells(data->type) + 1); + used_flags->reinit, MapsAll>>(map_.getBufferSizeInCells(data->type) + 1); }); } } @@ -1304,7 +1329,9 @@ void HashJoin::validateAdditionalFilterExpression(ExpressionActionsPtr additiona additional_filter_expression->dumpActions()); } - bool is_supported = (strictness == JoinStrictness::All) && (isInnerOrLeft(kind) || isRightOrFull(kind)); + bool is_supported = ((strictness == JoinStrictness::All) && (isInnerOrLeft(kind) || isRightOrFull(kind))) + || ((strictness == JoinStrictness::Semi || strictness == JoinStrictness::Any || strictness == JoinStrictness::Anti) + && (isLeft(kind) || isRight(kind))) || (strictness == JoinStrictness::Any && (isInner(kind))); if (!is_supported) { throw Exception(ErrorCodes::INVALID_JOIN_ON_EXPRESSION, diff --git a/src/Interpreters/HashJoin/HashJoinMethods.h b/src/Interpreters/HashJoin/HashJoinMethods.h index 0dfafa94efc..3b7a67467e3 100644 --- a/src/Interpreters/HashJoin/HashJoinMethods.h +++ b/src/Interpreters/HashJoin/HashJoinMethods.h @@ -12,15 +12,8 @@ #include #include - namespace DB { -namespace ErrorCodes -{ - extern const int UNSUPPORTED_JOIN_KEYS; - extern const int LOGICAL_ERROR; -} - /// Inserting an element into a hash table of the form `key -> reference to a string`, which will then be used by JOIN. template struct Inserter @@ -64,7 +57,6 @@ struct Inserter } }; - /// MapsTemplate is one of MapsOne, MapsAll and MapsAsof template class HashJoinMethods @@ -81,27 +73,7 @@ public: ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, - bool & is_inserted) - { - switch (type) - { - case HashJoin::Type::EMPTY: - [[fallthrough]]; - case HashJoin::Type::CROSS: - /// Do nothing. We will only save block, and it is enough - is_inserted = true; - return 0; - - #define M(TYPE) \ - case HashJoin::Type::TYPE: \ - return insertFromBlockImplTypeCase>::Type>(\ - join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted); \ - break; - - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - } - } + bool & is_inserted); using MapsTemplateVector = std::vector; @@ -110,280 +82,36 @@ public: Block & block, const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, - bool is_join_get = false) - { - constexpr JoinFeatures join_features; - - std::vector join_on_keys; - const auto & onexprs = join.table_join->getClauses(); - for (size_t i = 0; i < onexprs.size(); ++i) - { - const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; - join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); - } - size_t existing_columns = block.columns(); - - /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. - * Because if they are constants, then in the "not joined" rows, they may have different values - * - default values, which can differ from the values of these constants. - */ - if constexpr (join_features.right || join_features.full) - { - materializeBlockInplace(block); - } - - /** For LEFT/INNER JOIN, the saved blocks do not contain keys. - * For FULL/RIGHT JOIN, the saved blocks contain keys; - * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. - * For ASOF, the last column is used as the ASOF column - */ - AddedColumns added_columns( - block, - block_with_columns_to_add, - join.savedBlockSample(), - join, - std::move(join_on_keys), - join.table_join->getMixedJoinExpression(), - join_features.is_asof_join, - is_join_get); - - bool has_required_right_keys = (join.required_right_keys.columns() != 0); - added_columns.need_filter = join_features.need_filter || has_required_right_keys; - added_columns.max_joined_block_rows = join.max_joined_block_rows; - if (!added_columns.max_joined_block_rows) - added_columns.max_joined_block_rows = std::numeric_limits::max(); - else - added_columns.reserve(join_features.need_replication); - - size_t num_joined = switchJoinRightColumns(maps_, added_columns, join.data->type, *join.used_flags); - /// Do not hold memory for join_on_keys anymore - added_columns.join_on_keys.clear(); - Block remaining_block = sliceBlock(block, num_joined); - - added_columns.buildOutput(); - for (size_t i = 0; i < added_columns.size(); ++i) - block.insert(added_columns.moveColumn(i)); - - std::vector right_keys_to_replicate [[maybe_unused]]; - - if constexpr (join_features.need_filter) - { - /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(added_columns.filter, -1); - - /// Add join key columns from right block if needed using value from left table because of equality - for (size_t i = 0; i < join.required_right_keys.columns(); ++i) - { - const auto & right_key = join.required_right_keys.getByPosition(i); - /// asof column is already in block. - if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) - continue; - - const auto & left_column = block.getByName(join.required_right_keys_sources[i]); - const auto & right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); - auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); - block.insert(std::move(right_col)); - } - } - else if (has_required_right_keys) - { - /// Add join key columns from right block if needed. - for (size_t i = 0; i < join.required_right_keys.columns(); ++i) - { - const auto & right_key = join.required_right_keys.getByPosition(i); - auto right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); - /// asof column is already in block. - if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) - continue; - - const auto & left_column = block.getByName(join.required_right_keys_sources[i]); - auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); - block.insert(std::move(right_col)); - - if constexpr (join_features.need_replication) - right_keys_to_replicate.push_back(block.getPositionByName(right_col_name)); - } - } - - if constexpr (join_features.need_replication) - { - std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; - - /// If ALL ... JOIN - we replicate all the columns except the new ones. - for (size_t i = 0; i < existing_columns; ++i) - { - block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); - } - - /// Replicate additional right keys - for (size_t pos : right_keys_to_replicate) - { - block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); - } - } - - return remaining_block; - } - + bool is_join_get = false); private: template - static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes) - { - if constexpr (is_asof_join) - { - auto key_column_copy = key_columns; - auto key_size_copy = key_sizes; - key_column_copy.pop_back(); - key_size_copy.pop_back(); - return KeyGetter(key_column_copy, key_size_copy, nullptr); - } - else - return KeyGetter(key_columns, key_sizes, nullptr); - } + static KeyGetter createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes); template - static size_t NO_INLINE insertFromBlockImplTypeCase( + static size_t insertFromBlockImplTypeCase( HashJoin & join, HashMap & map, size_t rows, const ColumnRawPtrs & key_columns, - const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted) - { - [[maybe_unused]] constexpr bool mapped_one = std::is_same_v; - constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; - - const IColumn * asof_column [[maybe_unused]] = nullptr; - if constexpr (is_asof_join) - asof_column = key_columns.back(); - - auto key_getter = createKeyGetter(key_columns, key_sizes); - - /// For ALL and ASOF join always insert values - is_inserted = !mapped_one || is_asof_join; - - for (size_t i = 0; i < rows; ++i) - { - if (null_map && (*null_map)[i]) - { - /// nulls are not inserted into hash table, - /// keep them for RIGHT and FULL joins - is_inserted = true; - continue; - } - - /// Check condition for right table from ON section - if (join_mask && !(*join_mask)[i]) - continue; - - if constexpr (is_asof_join) - Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, *asof_column); - else if constexpr (mapped_one) - is_inserted |= Inserter::insertOne(join, map, key_getter, stored_block, i, pool); - else - Inserter::insertAll(join, map, key_getter, stored_block, i, pool); - } - return map.getBufferSizeInCells(); - } + const Sizes & key_sizes, Block * stored_block, ConstNullMapPtr null_map, UInt8ColumnDataPtr join_mask, Arena & pool, bool & is_inserted); template static size_t switchJoinRightColumns( const std::vector & mapv, AddedColumns & added_columns, HashJoin::Type type, - JoinStuff::JoinUsedFlags & used_flags) - { - constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; - switch (type) - { - case HashJoin::Type::EMPTY: { - if constexpr (!is_asof_join) - { - using KeyGetter = KeyGetterEmpty; - std::vector key_getter_vector; - key_getter_vector.emplace_back(); - - using MapTypeVal = typename KeyGetter::MappedType; - std::vector a_map_type_vector; - a_map_type_vector.emplace_back(); - return joinRightColumnsSwitchNullability( - std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); - } - throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys. Type: {}", type); - } - #define M(TYPE) \ - case HashJoin::Type::TYPE: \ - { \ - using MapTypeVal = const typename std::remove_reference_t::element_type; \ - using KeyGetter = typename KeyGetterForType::Type; \ - std::vector a_map_type_vector(mapv.size()); \ - std::vector key_getter_vector; \ - for (size_t d = 0; d < added_columns.join_on_keys.size(); ++d) \ - { \ - const auto & join_on_key = added_columns.join_on_keys[d]; \ - a_map_type_vector[d] = mapv[d]->TYPE.get(); \ - key_getter_vector.push_back(std::move(createKeyGetter(join_on_key.key_columns, join_on_key.key_sizes))); \ - } \ - return joinRightColumnsSwitchNullability( \ - std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); \ - } - APPLY_FOR_JOIN_VARIANTS(M) - #undef M - - default: - throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", type); - } - } + JoinStuff::JoinUsedFlags & used_flags); template static size_t joinRightColumnsSwitchNullability( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) - { - if (added_columns.need_filter) - { - return joinRightColumnsSwitchMultipleDisjuncts( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); - } - else - { - return joinRightColumnsSwitchMultipleDisjuncts( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); - } - } + JoinStuff::JoinUsedFlags & used_flags); template static size_t joinRightColumnsSwitchMultipleDisjuncts( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) - { - constexpr JoinFeatures join_features; - if constexpr (join_features.is_all_join) - { - if (added_columns.additional_filter_expression) - { - bool mark_per_row_used = join_features.right || join_features.full || mapv.size() > 1; - return joinRightColumnsWithAddtitionalFilter( - std::forward>(key_getter_vector), - mapv, - added_columns, - used_flags, - need_filter, - join_features.need_flags, - join_features.add_missing, - mark_per_row_used); - } - } - - if (added_columns.additional_filter_expression) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Additional filter expression is not supported for this JOIN"); - - return mapv.size() > 1 ? joinRightColumns( - std::forward>(key_getter_vector), mapv, added_columns, used_flags) - : joinRightColumns( - std::forward>(key_getter_vector), mapv, added_columns, used_flags); - } + JoinStuff::JoinUsedFlags & used_flags); /// Joins right table columns which indexes are present in right_indexes using specified map. /// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). @@ -392,464 +120,30 @@ private: std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, - JoinStuff::JoinUsedFlags & used_flags) - { - constexpr JoinFeatures join_features; - - size_t rows = added_columns.rows_to_add; - if constexpr (need_filter) - added_columns.filter = IColumn::Filter(rows, 0); - - Arena pool; - - if constexpr (join_features.need_replication) - added_columns.offsets_to_replicate = std::make_unique(rows); - - IColumn::Offset current_offset = 0; - size_t max_joined_block_rows = added_columns.max_joined_block_rows; - size_t i = 0; - for (; i < rows; ++i) - { - if constexpr (join_features.need_replication) - { - if (unlikely(current_offset >= max_joined_block_rows)) - { - added_columns.offsets_to_replicate->resize_assume_reserved(i); - added_columns.filter.resize_assume_reserved(i); - break; - } - } - - bool right_row_found = false; - - KnownRowsHolder known_rows; - for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx) - { - const auto & join_keys = added_columns.join_on_keys[onexpr_idx]; - if (join_keys.null_map && (*join_keys.null_map)[i]) - continue; - - bool row_acceptable = !join_keys.isRowFiltered(i); - using FindResult = typename KeyGetter::FindResult; - auto find_result = row_acceptable ? key_getter_vector[onexpr_idx].findKey(*(mapv[onexpr_idx]), i, pool) : FindResult(); - - if (find_result.isFound()) - { - right_row_found = true; - auto & mapped = find_result.getMapped(); - if constexpr (join_features.is_asof_join) - { - const IColumn & left_asof_key = added_columns.leftAsofKey(); - - auto row_ref = mapped->findAsof(left_asof_key, i); - if (row_ref.block) - { - setUsed(added_columns.filter, i); - if constexpr (flag_per_row) - used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); - else - used_flags.template setUsed(find_result); - - added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing); - } - else - addNotFoundRow(added_columns, current_offset); - } - else if constexpr (join_features.is_all_join) - { - setUsed(added_columns.filter, i); - used_flags.template setUsed(find_result); - auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; - addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); - } - else if constexpr ((join_features.is_any_join || join_features.is_semi_join) && join_features.right) - { - /// Use first appeared left key + it needs left columns replication - bool used_once = used_flags.template setUsedOnce(find_result); - if (used_once) - { - auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; - setUsed(added_columns.filter, i); - addFoundRowAll( - mapped, added_columns, current_offset, known_rows, used_flags_opt); - } - } - else if constexpr (join_features.is_any_join && KIND == JoinKind::Inner) - { - bool used_once = used_flags.template setUsedOnce(find_result); - - /// Use first appeared left key only - if (used_once) - { - setUsed(added_columns.filter, i); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); - } - - break; - } - else if constexpr (join_features.is_any_join && join_features.full) - { - /// TODO - } - else if constexpr (join_features.is_anti_join) - { - if constexpr (join_features.right && join_features.need_flags) - used_flags.template setUsed(find_result); - } - else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) - { - setUsed(added_columns.filter, i); - used_flags.template setUsed(find_result); - added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); - - if (join_features.is_any_or_semi_join) - { - break; - } - } - } - } - - if (!right_row_found) - { - if constexpr (join_features.is_anti_join && join_features.left) - setUsed(added_columns.filter, i); - addNotFoundRow(added_columns, current_offset); - } - - if constexpr (join_features.need_replication) - { - (*added_columns.offsets_to_replicate)[i] = current_offset; - } - } - - added_columns.applyLazyDefaults(); - return i; - } + JoinStuff::JoinUsedFlags & used_flags); template - static void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]) - { - if constexpr (need_filter) - filter[pos] = 1; - } + static void setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]); template static ColumnPtr buildAdditionalFilter( size_t left_start_row, const std::vector & selected_rows, const std::vector & row_replicate_offset, - AddedColumns & added_columns) - { - ColumnPtr result_column; - do - { - if (selected_rows.empty()) - { - result_column = ColumnUInt8::create(); - break; - } - const Block & sample_right_block = *selected_rows.begin()->block; - if (!sample_right_block || !added_columns.additional_filter_expression) - { - auto filter = ColumnUInt8::create(); - filter->insertMany(1, selected_rows.size()); - result_column = std::move(filter); - break; - } - - auto required_cols = added_columns.additional_filter_expression->getRequiredColumnsWithTypes(); - if (required_cols.empty()) - { - Block block; - added_columns.additional_filter_expression->execute(block); - result_column = block.getByPosition(0).column->cloneResized(selected_rows.size()); - break; - } - NameSet required_column_names; - for (auto & col : required_cols) - required_column_names.insert(col.name); - - Block executed_block; - size_t right_col_pos = 0; - for (const auto & col : sample_right_block.getColumnsWithTypeAndName()) - { - if (required_column_names.contains(col.name)) - { - auto new_col = col.column->cloneEmpty(); - for (const auto & selected_row : selected_rows) - { - const auto & src_col = selected_row.block->getByPosition(right_col_pos); - new_col->insertFrom(*src_col.column, selected_row.row_num); - } - executed_block.insert({std::move(new_col), col.type, col.name}); - } - right_col_pos += 1; - } - if (!executed_block) - { - result_column = ColumnUInt8::create(); - break; - } - - for (const auto & col_name : required_column_names) - { - const auto * src_col = added_columns.left_block.findByName(col_name); - if (!src_col) - continue; - auto new_col = src_col->column->cloneEmpty(); - size_t prev_left_offset = 0; - for (size_t i = 1; i < row_replicate_offset.size(); ++i) - { - const size_t & left_offset = row_replicate_offset[i]; - size_t rows = left_offset - prev_left_offset; - if (rows) - new_col->insertManyFrom(*src_col->column, left_start_row + i - 1, rows); - prev_left_offset = left_offset; - } - executed_block.insert({std::move(new_col), src_col->type, col_name}); - } - if (!executed_block) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "required columns: [{}], but not found any in left/right table. right table: {}, left table: {}", - required_cols.toString(), - sample_right_block.dumpNames(), - added_columns.left_block.dumpNames()); - } - - for (const auto & col : executed_block.getColumnsWithTypeAndName()) - if (!col.column || !col.type) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal nullptr column in input block: {}", executed_block.dumpStructure()); - - added_columns.additional_filter_expression->execute(executed_block); - result_column = executed_block.getByPosition(0).column->convertToFullColumnIfConst(); - executed_block.clear(); - } while (false); - - result_column = result_column->convertToFullIfNeeded(); - if (result_column->isNullable()) - { - /// Convert Nullable(UInt8) to UInt8 ensuring that nulls are zeros - /// Trying to avoid copying data, since we are the only owner of the column. - ColumnPtr mask_column = assert_cast(*result_column).getNullMapColumnPtr(); - - MutableColumnPtr mutable_column; - { - ColumnPtr nested_column = assert_cast(*result_column).getNestedColumnPtr(); - result_column.reset(); - mutable_column = IColumn::mutate(std::move(nested_column)); - } - - auto & column_data = assert_cast(*mutable_column).getData(); - const auto & mask_column_data = assert_cast(*mask_column).getData(); - for (size_t i = 0; i < column_data.size(); ++i) - { - if (mask_column_data[i]) - column_data[i] = 0; - } - return mutable_column; - } - return result_column; - } + AddedColumns & added_columns); /// First to collect all matched rows refs by join keys, then filter out rows which are not true in additional filter expression. - template + template static size_t joinRightColumnsWithAddtitionalFilter( std::vector && key_getter_vector, const std::vector & mapv, AddedColumns & added_columns, JoinStuff::JoinUsedFlags & used_flags [[maybe_unused]], bool need_filter [[maybe_unused]], - bool need_flags [[maybe_unused]], - bool add_missing [[maybe_unused]], - bool flag_per_row [[maybe_unused]]) - { - size_t left_block_rows = added_columns.rows_to_add; - if (need_filter) - added_columns.filter = IColumn::Filter(left_block_rows, 0); - - std::unique_ptr pool; - - if constexpr (need_replication) - added_columns.offsets_to_replicate = std::make_unique(left_block_rows); - - std::vector row_replicate_offset; - row_replicate_offset.reserve(left_block_rows); - - using FindResult = typename KeyGetter::FindResult; - size_t max_joined_block_rows = added_columns.max_joined_block_rows; - size_t left_row_iter = 0; - PreSelectedRows selected_rows; - selected_rows.reserve(left_block_rows); - std::vector find_results; - find_results.reserve(left_block_rows); - bool exceeded_max_block_rows = false; - IColumn::Offset total_added_rows = 0; - IColumn::Offset current_added_rows = 0; - - auto collect_keys_matched_rows_refs = [&]() - { - pool = std::make_unique(); - find_results.clear(); - row_replicate_offset.clear(); - row_replicate_offset.push_back(0); - current_added_rows = 0; - selected_rows.clear(); - for (; left_row_iter < left_block_rows; ++left_row_iter) - { - if constexpr (need_replication) - { - if (unlikely(total_added_rows + current_added_rows >= max_joined_block_rows)) - { - break; - } - } - KnownRowsHolder all_flag_known_rows; - KnownRowsHolder single_flag_know_rows; - for (size_t join_clause_idx = 0; join_clause_idx < added_columns.join_on_keys.size(); ++join_clause_idx) - { - const auto & join_keys = added_columns.join_on_keys[join_clause_idx]; - if (join_keys.null_map && (*join_keys.null_map)[left_row_iter]) - continue; - - bool row_acceptable = !join_keys.isRowFiltered(left_row_iter); - auto find_result = row_acceptable - ? key_getter_vector[join_clause_idx].findKey(*(mapv[join_clause_idx]), left_row_iter, *pool) - : FindResult(); - - if (find_result.isFound()) - { - auto & mapped = find_result.getMapped(); - find_results.push_back(find_result); - if (flag_per_row) - addFoundRowAll(mapped, selected_rows, current_added_rows, all_flag_known_rows, nullptr); - else - addFoundRowAll(mapped, selected_rows, current_added_rows, single_flag_know_rows, nullptr); - } - } - row_replicate_offset.push_back(current_added_rows); - } - }; - - auto copy_final_matched_rows = [&](size_t left_start_row, ColumnPtr filter_col) - { - const PaddedPODArray & filter_flags = assert_cast(*filter_col).getData(); - - size_t prev_replicated_row = 0; - auto selected_right_row_it = selected_rows.begin(); - size_t find_result_index = 0; - for (size_t i = 1, n = row_replicate_offset.size(); i < n; ++i) - { - bool any_matched = false; - /// For all right join, flag_per_row is true, we need mark used flags for each row. - if (flag_per_row) - { - for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) - { - if (filter_flags[replicated_row]) - { - any_matched = true; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, add_missing); - total_added_rows += 1; - if (need_flags) - used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); - } - ++selected_right_row_it; - } - } - else - { - for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) - { - if (filter_flags[replicated_row]) - { - any_matched = true; - added_columns.appendFromBlock(*selected_right_row_it->block, selected_right_row_it->row_num, add_missing); - total_added_rows += 1; - } - ++selected_right_row_it; - } - } - if (!any_matched) - { - if (add_missing) - addNotFoundRow(added_columns, total_added_rows); - else - addNotFoundRow(added_columns, total_added_rows); - } - else - { - if (!flag_per_row && need_flags) - used_flags.template setUsed(find_results[find_result_index]); - if (need_filter) - setUsed(added_columns.filter, left_start_row + i - 1); - if (add_missing) - added_columns.applyLazyDefaults(); - } - find_result_index += (prev_replicated_row != row_replicate_offset[i]); - - if constexpr (need_replication) - { - (*added_columns.offsets_to_replicate)[left_start_row + i - 1] = total_added_rows; - } - prev_replicated_row = row_replicate_offset[i]; - } - }; - - while (left_row_iter < left_block_rows && !exceeded_max_block_rows) - { - auto left_start_row = left_row_iter; - collect_keys_matched_rows_refs(); - if (selected_rows.size() != current_added_rows || row_replicate_offset.size() != left_row_iter - left_start_row + 1) - { - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "Sizes are mismatched. selected_rows.size:{}, current_added_rows:{}, row_replicate_offset.size:{}, left_row_iter: {}, " - "left_start_row: {}", - selected_rows.size(), - current_added_rows, - row_replicate_offset.size(), - left_row_iter, - left_start_row); - } - auto filter_col = buildAdditionalFilter(left_start_row, selected_rows, row_replicate_offset, added_columns); - copy_final_matched_rows(left_start_row, filter_col); - - if constexpr (need_replication) - { - // Add a check for current_added_rows to avoid run the filter expression on too small size batch. - if (total_added_rows >= max_joined_block_rows || current_added_rows < 1024) - exceeded_max_block_rows = true; - } - } - - if constexpr (need_replication) - { - added_columns.offsets_to_replicate->resize_assume_reserved(left_row_iter); - added_columns.filter.resize_assume_reserved(left_row_iter); - } - added_columns.applyLazyDefaults(); - return left_row_iter; - } + bool flag_per_row [[maybe_unused]]); /// Cut first num_rows rows from block in place and returns block with remaining rows - static Block sliceBlock(Block & block, size_t num_rows) - { - size_t total_rows = block.rows(); - if (num_rows >= total_rows) - return {}; - size_t remaining_rows = total_rows - num_rows; - Block remaining_block = block.cloneEmpty(); - for (size_t i = 0; i < block.columns(); ++i) - { - auto & col = block.getByPosition(i); - remaining_block.getByPosition(i).column = col.column->cut(num_rows, remaining_rows); - col.column = col.column->cut(0, num_rows); - } - return remaining_block; - } + static Block sliceBlock(Block & block, size_t num_rows); /** Since we do not store right key columns, * this function is used to copy left key columns to right key columns. @@ -864,70 +158,22 @@ private: const DataTypePtr & right_key_type, const String & renamed_right_column, const ColumnWithTypeAndName & left_column, - const IColumn::Filter * null_map_filter = nullptr) - { - ColumnWithTypeAndName right_column = left_column; - right_column.name = renamed_right_column; + const IColumn::Filter * null_map_filter = nullptr); - if (null_map_filter) - right_column.column = JoinCommon::filterWithBlanks(right_column.column, *null_map_filter); + static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable); - bool should_be_nullable = isNullableOrLowCardinalityNullable(right_key_type); - if (null_map_filter) - correctNullabilityInplace(right_column, should_be_nullable, *null_map_filter); - else - correctNullabilityInplace(right_column, should_be_nullable); - - if (!right_column.type->equals(*right_key_type)) - { - right_column.column = castColumnAccurate(right_column, right_key_type); - right_column.type = right_key_type; - } - - right_column.column = right_column.column->convertToFullColumnIfConst(); - return right_column; - } - - static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable) - { - if (nullable) - { - JoinCommon::convertColumnToNullable(column); - } - else - { - /// We have to replace values masked by NULLs with defaults. - if (column.column) - if (const auto * nullable_column = checkAndGetColumn(&*column.column)) - column.column = JoinCommon::filterWithBlanks(column.column, nullable_column->getNullMapColumn().getData(), true); - - JoinCommon::removeColumnNullability(column); - } - } - - static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) - { - if (nullable) - { - JoinCommon::convertColumnToNullable(column); - if (column.type->isNullable() && !negative_null_map.empty()) - { - MutableColumnPtr mutable_column = IColumn::mutate(std::move(column.column)); - assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); - column.column = std::move(mutable_column); - } - } - else - JoinCommon::removeColumnNullability(column); - } + static void correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map); }; /// Instantiate template class ahead in different .cpp files to avoid `too large translation unit`. extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; @@ -939,6 +185,7 @@ extern template class HashJoinMethods; extern template class HashJoinMethods; +extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; extern template class HashJoinMethods; diff --git a/src/Interpreters/HashJoin/HashJoinMethodsImpl.h b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h new file mode 100644 index 00000000000..39ba9fc6e93 --- /dev/null +++ b/src/Interpreters/HashJoin/HashJoinMethodsImpl.h @@ -0,0 +1,936 @@ +#pragma once +#include + +namespace DB +{ +namespace ErrorCodes +{ +extern const int UNSUPPORTED_JOIN_KEYS; +extern const int LOGICAL_ERROR; +} +template +size_t HashJoinMethods::insertFromBlockImpl( + HashJoin & join, + HashJoin::Type type, + MapsTemplate & maps, + size_t rows, + const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, + Block * stored_block, + ConstNullMapPtr null_map, + UInt8ColumnDataPtr join_mask, + Arena & pool, + bool & is_inserted) +{ + switch (type) + { + case HashJoin::Type::EMPTY: + [[fallthrough]]; + case HashJoin::Type::CROSS: + /// Do nothing. We will only save block, and it is enough + is_inserted = true; + return 0; + +#define M(TYPE) \ + case HashJoin::Type::TYPE: \ + return insertFromBlockImplTypeCase< \ + typename KeyGetterForType>::Type>( \ + join, *maps.TYPE, rows, key_columns, key_sizes, stored_block, null_map, join_mask, pool, is_inserted); \ + break; + + APPLY_FOR_JOIN_VARIANTS(M) +#undef M + } +} + +template +Block HashJoinMethods::joinBlockImpl( + const HashJoin & join, Block & block, const Block & block_with_columns_to_add, const MapsTemplateVector & maps_, bool is_join_get) +{ + constexpr JoinFeatures join_features; + + std::vector join_on_keys; + const auto & onexprs = join.table_join->getClauses(); + for (size_t i = 0; i < onexprs.size(); ++i) + { + const auto & key_names = !is_join_get ? onexprs[i].key_names_left : onexprs[i].key_names_right; + join_on_keys.emplace_back(block, key_names, onexprs[i].condColumnNames().first, join.key_sizes[i]); + } + size_t existing_columns = block.columns(); + + /** If you use FULL or RIGHT JOIN, then the columns from the "left" table must be materialized. + * Because if they are constants, then in the "not joined" rows, they may have different values + * - default values, which can differ from the values of these constants. + */ + if constexpr (join_features.right || join_features.full) + { + materializeBlockInplace(block); + } + + /** For LEFT/INNER JOIN, the saved blocks do not contain keys. + * For FULL/RIGHT JOIN, the saved blocks contain keys; + * but they will not be used at this stage of joining (and will be in `AdderNonJoined`), and they need to be skipped. + * For ASOF, the last column is used as the ASOF column + */ + AddedColumns added_columns( + block, + block_with_columns_to_add, + join.savedBlockSample(), + join, + std::move(join_on_keys), + join.table_join->getMixedJoinExpression(), + join_features.is_asof_join, + is_join_get); + + bool has_required_right_keys = (join.required_right_keys.columns() != 0); + added_columns.need_filter = join_features.need_filter || has_required_right_keys; + added_columns.max_joined_block_rows = join.max_joined_block_rows; + if (!added_columns.max_joined_block_rows) + added_columns.max_joined_block_rows = std::numeric_limits::max(); + else + added_columns.reserve(join_features.need_replication); + + size_t num_joined = switchJoinRightColumns(maps_, added_columns, join.data->type, *join.used_flags); + /// Do not hold memory for join_on_keys anymore + added_columns.join_on_keys.clear(); + Block remaining_block = sliceBlock(block, num_joined); + + added_columns.buildOutput(); + for (size_t i = 0; i < added_columns.size(); ++i) + block.insert(added_columns.moveColumn(i)); + + std::vector right_keys_to_replicate [[maybe_unused]]; + + if constexpr (join_features.need_filter) + { + /// If ANY INNER | RIGHT JOIN - filter all the columns except the new ones. + for (size_t i = 0; i < existing_columns; ++i) + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->filter(added_columns.filter, -1); + + /// Add join key columns from right block if needed using value from left table because of equality + for (size_t i = 0; i < join.required_right_keys.columns(); ++i) + { + const auto & right_key = join.required_right_keys.getByPosition(i); + /// asof column is already in block. + if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) + continue; + + const auto & left_column = block.getByName(join.required_right_keys_sources[i]); + const auto & right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); + auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column); + block.insert(std::move(right_col)); + } + } + else if (has_required_right_keys) + { + /// Add join key columns from right block if needed. + for (size_t i = 0; i < join.required_right_keys.columns(); ++i) + { + const auto & right_key = join.required_right_keys.getByPosition(i); + auto right_col_name = join.getTableJoin().renamedRightColumnName(right_key.name); + /// asof column is already in block. + if (join_features.is_asof_join && right_key.name == join.table_join->getOnlyClause().key_names_right.back()) + continue; + + const auto & left_column = block.getByName(join.required_right_keys_sources[i]); + auto right_col = copyLeftKeyColumnToRight(right_key.type, right_col_name, left_column, &added_columns.filter); + block.insert(std::move(right_col)); + + if constexpr (join_features.need_replication) + right_keys_to_replicate.push_back(block.getPositionByName(right_col_name)); + } + } + + if constexpr (join_features.need_replication) + { + std::unique_ptr & offsets_to_replicate = added_columns.offsets_to_replicate; + + /// If ALL ... JOIN - we replicate all the columns except the new ones. + for (size_t i = 0; i < existing_columns; ++i) + { + block.safeGetByPosition(i).column = block.safeGetByPosition(i).column->replicate(*offsets_to_replicate); + } + + /// Replicate additional right keys + for (size_t pos : right_keys_to_replicate) + { + block.safeGetByPosition(pos).column = block.safeGetByPosition(pos).column->replicate(*offsets_to_replicate); + } + } + return remaining_block; +} + +template +template +KeyGetter HashJoinMethods::createKeyGetter(const ColumnRawPtrs & key_columns, const Sizes & key_sizes) +{ + if constexpr (is_asof_join) + { + auto key_column_copy = key_columns; + auto key_size_copy = key_sizes; + key_column_copy.pop_back(); + key_size_copy.pop_back(); + return KeyGetter(key_column_copy, key_size_copy, nullptr); + } + else + return KeyGetter(key_columns, key_sizes, nullptr); +} + +template +template +size_t HashJoinMethods::insertFromBlockImplTypeCase( + HashJoin & join, + HashMap & map, + size_t rows, + const ColumnRawPtrs & key_columns, + const Sizes & key_sizes, + Block * stored_block, + ConstNullMapPtr null_map, + UInt8ColumnDataPtr join_mask, + Arena & pool, + bool & is_inserted) +{ + [[maybe_unused]] constexpr bool mapped_one = std::is_same_v; + constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; + + const IColumn * asof_column [[maybe_unused]] = nullptr; + if constexpr (is_asof_join) + asof_column = key_columns.back(); + + auto key_getter = createKeyGetter(key_columns, key_sizes); + + /// For ALL and ASOF join always insert values + is_inserted = !mapped_one || is_asof_join; + + for (size_t i = 0; i < rows; ++i) + { + if (null_map && (*null_map)[i]) + { + /// nulls are not inserted into hash table, + /// keep them for RIGHT and FULL joins + is_inserted = true; + continue; + } + + /// Check condition for right table from ON section + if (join_mask && !(*join_mask)[i]) + continue; + + if constexpr (is_asof_join) + Inserter::insertAsof(join, map, key_getter, stored_block, i, pool, *asof_column); + else if constexpr (mapped_one) + is_inserted |= Inserter::insertOne(join, map, key_getter, stored_block, i, pool); + else + Inserter::insertAll(join, map, key_getter, stored_block, i, pool); + } + return map.getBufferSizeInCells(); +} + +template +template +size_t HashJoinMethods::switchJoinRightColumns( + const std::vector & mapv, + AddedColumns & added_columns, + HashJoin::Type type, + JoinStuff::JoinUsedFlags & used_flags) +{ + constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; + switch (type) + { + case HashJoin::Type::EMPTY: { + if constexpr (!is_asof_join) + { + using KeyGetter = KeyGetterEmpty; + std::vector key_getter_vector; + key_getter_vector.emplace_back(); + + using MapTypeVal = typename KeyGetter::MappedType; + std::vector a_map_type_vector; + a_map_type_vector.emplace_back(); + return joinRightColumnsSwitchNullability( + std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); + } + throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys. Type: {}", type); + } +#define M(TYPE) \ + case HashJoin::Type::TYPE: { \ + using MapTypeVal = const typename std::remove_reference_t::element_type; \ + using KeyGetter = typename KeyGetterForType::Type; \ + std::vector a_map_type_vector(mapv.size()); \ + std::vector key_getter_vector; \ + for (size_t d = 0; d < added_columns.join_on_keys.size(); ++d) \ + { \ + const auto & join_on_key = added_columns.join_on_keys[d]; \ + a_map_type_vector[d] = mapv[d]->TYPE.get(); \ + key_getter_vector.push_back( \ + std::move(createKeyGetter(join_on_key.key_columns, join_on_key.key_sizes))); \ + } \ + return joinRightColumnsSwitchNullability(std::move(key_getter_vector), a_map_type_vector, added_columns, used_flags); \ + } + APPLY_FOR_JOIN_VARIANTS(M) +#undef M + + default: + throw Exception(ErrorCodes::UNSUPPORTED_JOIN_KEYS, "Unsupported JOIN keys (type: {})", type); + } +} + +template +template +size_t HashJoinMethods::joinRightColumnsSwitchNullability( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags) +{ + if (added_columns.need_filter) + { + return joinRightColumnsSwitchMultipleDisjuncts( + std::forward>(key_getter_vector), mapv, added_columns, used_flags); + } + else + { + return joinRightColumnsSwitchMultipleDisjuncts( + std::forward>(key_getter_vector), mapv, added_columns, used_flags); + } +} + +template +template +size_t HashJoinMethods::joinRightColumnsSwitchMultipleDisjuncts( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags) +{ + constexpr JoinFeatures join_features; + if constexpr (join_features.is_maps_all) + { + if (added_columns.additional_filter_expression) + { + bool mark_per_row_used = join_features.right || join_features.full || mapv.size() > 1; + return joinRightColumnsWithAddtitionalFilter( + std::forward>(key_getter_vector), mapv, added_columns, used_flags, need_filter, mark_per_row_used); + } + } + + if (added_columns.additional_filter_expression) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Additional filter expression is not supported for this JOIN"); + + return mapv.size() > 1 ? joinRightColumns( + std::forward>(key_getter_vector), mapv, added_columns, used_flags) + : joinRightColumns( + std::forward>(key_getter_vector), mapv, added_columns, used_flags); +} + + +/// Joins right table columns which indexes are present in right_indexes using specified map. +/// Makes filter (1 if row presented in right table) and returns offsets to replicate (for ALL JOINS). +template +template +size_t HashJoinMethods::joinRightColumns( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags) +{ + constexpr JoinFeatures join_features; + + size_t rows = added_columns.rows_to_add; + if constexpr (need_filter) + added_columns.filter = IColumn::Filter(rows, 0); + + Arena pool; + + if constexpr (join_features.need_replication) + added_columns.offsets_to_replicate = std::make_unique(rows); + + IColumn::Offset current_offset = 0; + size_t max_joined_block_rows = added_columns.max_joined_block_rows; + size_t i = 0; + for (; i < rows; ++i) + { + if constexpr (join_features.need_replication) + { + if (unlikely(current_offset >= max_joined_block_rows)) + { + added_columns.offsets_to_replicate->resize(i); + added_columns.filter.resize(i); + break; + } + } + + bool right_row_found = false; + KnownRowsHolder known_rows; + for (size_t onexpr_idx = 0; onexpr_idx < added_columns.join_on_keys.size(); ++onexpr_idx) + { + const auto & join_keys = added_columns.join_on_keys[onexpr_idx]; + if (join_keys.null_map && (*join_keys.null_map)[i]) + continue; + + bool row_acceptable = !join_keys.isRowFiltered(i); + using FindResult = typename KeyGetter::FindResult; + auto find_result = row_acceptable ? key_getter_vector[onexpr_idx].findKey(*(mapv[onexpr_idx]), i, pool) : FindResult(); + + if (find_result.isFound()) + { + right_row_found = true; + auto & mapped = find_result.getMapped(); + if constexpr (join_features.is_asof_join) + { + const IColumn & left_asof_key = added_columns.leftAsofKey(); + + auto row_ref = mapped->findAsof(left_asof_key, i); + if (row_ref.block) + { + setUsed(added_columns.filter, i); + if constexpr (flag_per_row) + used_flags.template setUsed(row_ref.block, row_ref.row_num, 0); + else + used_flags.template setUsed(find_result); + + added_columns.appendFromBlock(*row_ref.block, row_ref.row_num, join_features.add_missing); + } + else + addNotFoundRow(added_columns, current_offset); + } + else if constexpr (join_features.is_all_join) + { + setUsed(added_columns.filter, i); + used_flags.template setUsed(find_result); + auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; + addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); + } + else if constexpr ((join_features.is_any_join || join_features.is_semi_join) && join_features.right) + { + /// Use first appeared left key + it needs left columns replication + bool used_once = used_flags.template setUsedOnce(find_result); + if (used_once) + { + auto used_flags_opt = join_features.need_flags ? &used_flags : nullptr; + setUsed(added_columns.filter, i); + addFoundRowAll(mapped, added_columns, current_offset, known_rows, used_flags_opt); + } + } + else if constexpr (join_features.is_any_join && join_features.inner) + { + bool used_once = used_flags.template setUsedOnce(find_result); + + /// Use first appeared left key only + if (used_once) + { + setUsed(added_columns.filter, i); + added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + } + + break; + } + else if constexpr (join_features.is_any_join && join_features.full) + { + /// TODO + } + else if constexpr (join_features.is_anti_join) + { + if constexpr (join_features.right && join_features.need_flags) + used_flags.template setUsed(find_result); + } + else /// ANY LEFT, SEMI LEFT, old ANY (RightAny) + { + setUsed(added_columns.filter, i); + used_flags.template setUsed(find_result); + added_columns.appendFromBlock(*mapped.block, mapped.row_num, join_features.add_missing); + + if (join_features.is_any_or_semi_join) + { + break; + } + } + } + } + + if (!right_row_found) + { + if constexpr (join_features.is_anti_join && join_features.left) + setUsed(added_columns.filter, i); + addNotFoundRow(added_columns, current_offset); + } + + if constexpr (join_features.need_replication) + { + (*added_columns.offsets_to_replicate)[i] = current_offset; + } + } + + added_columns.applyLazyDefaults(); + return i; +} + +template +template +void HashJoinMethods::setUsed(IColumn::Filter & filter [[maybe_unused]], size_t pos [[maybe_unused]]) +{ + if constexpr (need_filter) + filter[pos] = 1; +} + +template +template +ColumnPtr HashJoinMethods::buildAdditionalFilter( + size_t left_start_row, + const std::vector & selected_rows, + const std::vector & row_replicate_offset, + AddedColumns & added_columns) +{ + ColumnPtr result_column; + do + { + if (selected_rows.empty()) + { + result_column = ColumnUInt8::create(); + break; + } + const Block & sample_right_block = *selected_rows.begin()->block; + if (!sample_right_block || !added_columns.additional_filter_expression) + { + auto filter = ColumnUInt8::create(); + filter->insertMany(1, selected_rows.size()); + result_column = std::move(filter); + break; + } + + auto required_cols = added_columns.additional_filter_expression->getRequiredColumnsWithTypes(); + if (required_cols.empty()) + { + Block block; + added_columns.additional_filter_expression->execute(block); + result_column = block.getByPosition(0).column->cloneResized(selected_rows.size()); + break; + } + NameSet required_column_names; + for (auto & col : required_cols) + required_column_names.insert(col.name); + + Block executed_block; + size_t right_col_pos = 0; + for (const auto & col : sample_right_block.getColumnsWithTypeAndName()) + { + if (required_column_names.contains(col.name)) + { + auto new_col = col.column->cloneEmpty(); + for (const auto & selected_row : selected_rows) + { + const auto & src_col = selected_row.block->getByPosition(right_col_pos); + new_col->insertFrom(*src_col.column, selected_row.row_num); + } + executed_block.insert({std::move(new_col), col.type, col.name}); + } + right_col_pos += 1; + } + if (!executed_block) + { + result_column = ColumnUInt8::create(); + break; + } + + for (const auto & col_name : required_column_names) + { + const auto * src_col = added_columns.left_block.findByName(col_name); + if (!src_col) + continue; + auto new_col = src_col->column->cloneEmpty(); + size_t prev_left_offset = 0; + for (size_t i = 1; i < row_replicate_offset.size(); ++i) + { + const size_t & left_offset = row_replicate_offset[i]; + size_t rows = left_offset - prev_left_offset; + if (rows) + new_col->insertManyFrom(*src_col->column, left_start_row + i - 1, rows); + prev_left_offset = left_offset; + } + executed_block.insert({std::move(new_col), src_col->type, col_name}); + } + if (!executed_block) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "required columns: [{}], but not found any in left/right table. right table: {}, left table: {}", + required_cols.toString(), + sample_right_block.dumpNames(), + added_columns.left_block.dumpNames()); + } + + for (const auto & col : executed_block.getColumnsWithTypeAndName()) + if (!col.column || !col.type) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Illegal nullptr column in input block: {}", executed_block.dumpStructure()); + + added_columns.additional_filter_expression->execute(executed_block); + result_column = executed_block.getByPosition(0).column->convertToFullColumnIfConst(); + executed_block.clear(); + } while (false); + + result_column = result_column->convertToFullIfNeeded(); + if (result_column->isNullable()) + { + /// Convert Nullable(UInt8) to UInt8 ensuring that nulls are zeros + /// Trying to avoid copying data, since we are the only owner of the column. + ColumnPtr mask_column = assert_cast(*result_column).getNullMapColumnPtr(); + + MutableColumnPtr mutable_column; + { + ColumnPtr nested_column = assert_cast(*result_column).getNestedColumnPtr(); + result_column.reset(); + mutable_column = IColumn::mutate(std::move(nested_column)); + } + + auto & column_data = assert_cast(*mutable_column).getData(); + const auto & mask_column_data = assert_cast(*mask_column).getData(); + for (size_t i = 0; i < column_data.size(); ++i) + { + if (mask_column_data[i]) + column_data[i] = 0; + } + return mutable_column; + } + return result_column; +} + +template +template +size_t HashJoinMethods::joinRightColumnsWithAddtitionalFilter( + std::vector && key_getter_vector, + const std::vector & mapv, + AddedColumns & added_columns, + JoinStuff::JoinUsedFlags & used_flags [[maybe_unused]], + bool need_filter [[maybe_unused]], + bool flag_per_row [[maybe_unused]]) +{ + constexpr JoinFeatures join_features; + size_t left_block_rows = added_columns.rows_to_add; + if (need_filter) + added_columns.filter = IColumn::Filter(left_block_rows, 0); + + std::unique_ptr pool; + + if constexpr (join_features.need_replication) + added_columns.offsets_to_replicate = std::make_unique(left_block_rows); + + std::vector row_replicate_offset; + row_replicate_offset.reserve(left_block_rows); + + using FindResult = typename KeyGetter::FindResult; + size_t max_joined_block_rows = added_columns.max_joined_block_rows; + size_t left_row_iter = 0; + PreSelectedRows selected_rows; + selected_rows.reserve(left_block_rows); + std::vector find_results; + find_results.reserve(left_block_rows); + bool exceeded_max_block_rows = false; + IColumn::Offset total_added_rows = 0; + IColumn::Offset current_added_rows = 0; + + auto collect_keys_matched_rows_refs = [&]() + { + pool = std::make_unique(); + find_results.clear(); + row_replicate_offset.clear(); + row_replicate_offset.push_back(0); + current_added_rows = 0; + selected_rows.clear(); + for (; left_row_iter < left_block_rows; ++left_row_iter) + { + if constexpr (join_features.need_replication) + { + if (unlikely(total_added_rows + current_added_rows >= max_joined_block_rows)) + { + break; + } + } + KnownRowsHolder all_flag_known_rows; + KnownRowsHolder single_flag_know_rows; + for (size_t join_clause_idx = 0; join_clause_idx < added_columns.join_on_keys.size(); ++join_clause_idx) + { + const auto & join_keys = added_columns.join_on_keys[join_clause_idx]; + if (join_keys.null_map && (*join_keys.null_map)[left_row_iter]) + continue; + + bool row_acceptable = !join_keys.isRowFiltered(left_row_iter); + auto find_result = row_acceptable + ? key_getter_vector[join_clause_idx].findKey(*(mapv[join_clause_idx]), left_row_iter, *pool) + : FindResult(); + + if (find_result.isFound()) + { + auto & mapped = find_result.getMapped(); + find_results.push_back(find_result); + /// We don't add missing in addFoundRowAll here. we will add it after filter is applied. + /// it's different from `joinRightColumns`. + if (flag_per_row) + addFoundRowAll(mapped, selected_rows, current_added_rows, all_flag_known_rows, nullptr); + else + addFoundRowAll(mapped, selected_rows, current_added_rows, single_flag_know_rows, nullptr); + } + } + row_replicate_offset.push_back(current_added_rows); + } + }; + + auto copy_final_matched_rows = [&](size_t left_start_row, ColumnPtr filter_col) + { + const PaddedPODArray & filter_flags = assert_cast(*filter_col).getData(); + + size_t prev_replicated_row = 0; + auto selected_right_row_it = selected_rows.begin(); + size_t find_result_index = 0; + for (size_t i = 1, n = row_replicate_offset.size(); i < n; ++i) + { + bool any_matched = false; + /// right/full join or multiple disjuncts, we need to mark used flags for each row. + if (flag_per_row) + { + for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) + { + if (filter_flags[replicated_row]) + { + if constexpr (join_features.is_semi_join || join_features.is_any_join) + { + /// For LEFT/INNER SEMI/ANY JOIN, we need to add only first appeared row from left, + if constexpr (join_features.left || join_features.inner) + { + if (!any_matched) + { + // For inner join, we need mark each right row'flag, because we only use each right row once. + auto used_once = used_flags.template setUsedOnce( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + if (used_once) + { + any_matched = true; + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + } + } + } + else + { + auto used_once = used_flags.template setUsedOnce( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + if (used_once) + { + any_matched = true; + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + } + } + } + else if constexpr (join_features.is_anti_join) + { + any_matched = true; + if constexpr (join_features.right && join_features.need_flags) + used_flags.template setUsed(selected_right_row_it->block, selected_right_row_it->row_num, 0); + } + else + { + any_matched = true; + total_added_rows += 1; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + used_flags.template setUsed( + selected_right_row_it->block, selected_right_row_it->row_num, 0); + } + } + + ++selected_right_row_it; + } + } + else + { + for (size_t replicated_row = prev_replicated_row; replicated_row < row_replicate_offset[i]; ++replicated_row) + { + if constexpr (join_features.is_anti_join) + { + any_matched |= filter_flags[replicated_row]; + } + else if constexpr (join_features.need_replication) + { + if (filter_flags[replicated_row]) + { + any_matched = true; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + total_added_rows += 1; + } + ++selected_right_row_it; + } + else + { + if (filter_flags[replicated_row]) + { + any_matched = true; + added_columns.appendFromBlock( + *selected_right_row_it->block, selected_right_row_it->row_num, join_features.add_missing); + total_added_rows += 1; + selected_right_row_it = selected_right_row_it + row_replicate_offset[i] - replicated_row; + break; + } + else + ++selected_right_row_it; + } + } + } + + + if constexpr (join_features.is_anti_join) + { + if (!any_matched) + { + if constexpr (join_features.left) + if (need_filter) + setUsed(added_columns.filter, left_start_row + i - 1); + addNotFoundRow(added_columns, total_added_rows); + } + } + else + { + if (!any_matched) + { + addNotFoundRow(added_columns, total_added_rows); + } + else + { + if (!flag_per_row) + used_flags.template setUsed(find_results[find_result_index]); + if (need_filter) + setUsed(added_columns.filter, left_start_row + i - 1); + if constexpr (join_features.add_missing) + added_columns.applyLazyDefaults(); + } + } + find_result_index += (prev_replicated_row != row_replicate_offset[i]); + + if constexpr (join_features.need_replication) + { + (*added_columns.offsets_to_replicate)[left_start_row + i - 1] = total_added_rows; + } + prev_replicated_row = row_replicate_offset[i]; + } + }; + + while (left_row_iter < left_block_rows && !exceeded_max_block_rows) + { + auto left_start_row = left_row_iter; + collect_keys_matched_rows_refs(); + if (selected_rows.size() != current_added_rows || row_replicate_offset.size() != left_row_iter - left_start_row + 1) + { + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "Sizes are mismatched. selected_rows.size:{}, current_added_rows:{}, row_replicate_offset.size:{}, left_row_iter: {}, " + "left_start_row: {}", + selected_rows.size(), + current_added_rows, + row_replicate_offset.size(), + left_row_iter, + left_start_row); + } + auto filter_col = buildAdditionalFilter(left_start_row, selected_rows, row_replicate_offset, added_columns); + copy_final_matched_rows(left_start_row, filter_col); + + if constexpr (join_features.need_replication) + { + // Add a check for current_added_rows to avoid run the filter expression on too small size batch. + if (total_added_rows >= max_joined_block_rows || current_added_rows < 1024) + exceeded_max_block_rows = true; + } + } + + if constexpr (join_features.need_replication) + { + added_columns.offsets_to_replicate->resize_assume_reserved(left_row_iter); + added_columns.filter.resize_assume_reserved(left_row_iter); + } + added_columns.applyLazyDefaults(); + return left_row_iter; +} + +template +Block HashJoinMethods::sliceBlock(Block & block, size_t num_rows) +{ + size_t total_rows = block.rows(); + if (num_rows >= total_rows) + return {}; + size_t remaining_rows = total_rows - num_rows; + Block remaining_block = block.cloneEmpty(); + for (size_t i = 0; i < block.columns(); ++i) + { + auto & col = block.getByPosition(i); + remaining_block.getByPosition(i).column = col.column->cut(num_rows, remaining_rows); + col.column = col.column->cut(0, num_rows); + } + return remaining_block; +} + +template +ColumnWithTypeAndName HashJoinMethods::copyLeftKeyColumnToRight( + const DataTypePtr & right_key_type, + const String & renamed_right_column, + const ColumnWithTypeAndName & left_column, + const IColumn::Filter * null_map_filter) +{ + ColumnWithTypeAndName right_column = left_column; + right_column.name = renamed_right_column; + + if (null_map_filter) + right_column.column = JoinCommon::filterWithBlanks(right_column.column, *null_map_filter); + + bool should_be_nullable = isNullableOrLowCardinalityNullable(right_key_type); + if (null_map_filter) + correctNullabilityInplace(right_column, should_be_nullable, *null_map_filter); + else + correctNullabilityInplace(right_column, should_be_nullable); + + if (!right_column.type->equals(*right_key_type)) + { + right_column.column = castColumnAccurate(right_column, right_key_type); + right_column.type = right_key_type; + } + + right_column.column = right_column.column->convertToFullColumnIfConst(); + return right_column; +} + +template +void HashJoinMethods::correctNullabilityInplace(ColumnWithTypeAndName & column, bool nullable) +{ + if (nullable) + { + JoinCommon::convertColumnToNullable(column); + } + else + { + /// We have to replace values masked by NULLs with defaults. + if (column.column) + if (const auto * nullable_column = checkAndGetColumn(&*column.column)) + column.column = JoinCommon::filterWithBlanks(column.column, nullable_column->getNullMapColumn().getData(), true); + + JoinCommon::removeColumnNullability(column); + } +} + +template +void HashJoinMethods::correctNullabilityInplace( + ColumnWithTypeAndName & column, bool nullable, const IColumn::Filter & negative_null_map) +{ + if (nullable) + { + JoinCommon::convertColumnToNullable(column); + if (column.type->isNullable() && !negative_null_map.empty()) + { + MutableColumnPtr mutable_column = IColumn::mutate(std::move(column.column)); + assert_cast(*mutable_column).applyNegatedNullMap(negative_null_map); + column.column = std::move(mutable_column); + } + } + else + JoinCommon::removeColumnNullability(column); +} +} diff --git a/src/Interpreters/HashJoin/InnerHashJoin.cpp b/src/Interpreters/HashJoin/InnerHashJoin.cpp index 85aedf3a8e5..69f4c620cb8 100644 --- a/src/Interpreters/HashJoin/InnerHashJoin.cpp +++ b/src/Interpreters/HashJoin/InnerHashJoin.cpp @@ -1,10 +1,11 @@ -#include +#include namespace DB { template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; template class HashJoinMethods; template class HashJoinMethods; diff --git a/src/Interpreters/HashJoin/JoinFeatures.h b/src/Interpreters/HashJoin/JoinFeatures.h index 2f2bd1e29a2..b39593e7cac 100644 --- a/src/Interpreters/HashJoin/JoinFeatures.h +++ b/src/Interpreters/HashJoin/JoinFeatures.h @@ -3,26 +3,41 @@ #include namespace DB { -template +template struct JoinFeatures { static constexpr bool is_any_join = STRICTNESS == JoinStrictness::Any; - static constexpr bool is_any_or_semi_join = STRICTNESS == JoinStrictness::Any || STRICTNESS == JoinStrictness::RightAny || (STRICTNESS == JoinStrictness::Semi && KIND == JoinKind::Left); static constexpr bool is_all_join = STRICTNESS == JoinStrictness::All; static constexpr bool is_asof_join = STRICTNESS == JoinStrictness::Asof; static constexpr bool is_semi_join = STRICTNESS == JoinStrictness::Semi; static constexpr bool is_anti_join = STRICTNESS == JoinStrictness::Anti; + static constexpr bool is_any_or_semi_join = is_any_join || STRICTNESS == JoinStrictness::RightAny || (is_semi_join && KIND == JoinKind::Left); static constexpr bool left = KIND == JoinKind::Left; static constexpr bool right = KIND == JoinKind::Right; static constexpr bool inner = KIND == JoinKind::Inner; static constexpr bool full = KIND == JoinKind::Full; + /** Whether we may need duplicate rows from the left table. + * For example, when we have row (key1, attr1) in left table + * and rows (key1, attr2), (key1, attr3) in right table, + * then we need to duplicate row (key1, attr1) for each of joined rows from right table, so result will be + * (key1, attr1, key1, attr2) + * (key1, attr1, key1, attr3) + */ static constexpr bool need_replication = is_all_join || (is_any_join && right) || (is_semi_join && right); + + /// Whether we need to filter rows from the left table that do not have matches in the right table. static constexpr bool need_filter = !need_replication && (inner || right || (is_semi_join && left) || (is_anti_join && left)); + + /// Whether we need to add default values for columns from the left table. static constexpr bool add_missing = (left || full) && !is_semi_join; - static constexpr bool need_flags = MapGetter::flagged; + /// Whether we need to store flags for rows from the right table table + /// that indicates if they have matches in the left table. + static constexpr bool need_flags = MapGetter, HashJoin::MapsAll>>::flagged; + + static constexpr bool is_maps_all = std::is_same_v, HashJoin::MapsAll>; }; } diff --git a/src/Interpreters/HashJoin/JoinUsedFlags.h b/src/Interpreters/HashJoin/JoinUsedFlags.h index bd41ba2073f..c84c6ec3fea 100644 --- a/src/Interpreters/HashJoin/JoinUsedFlags.h +++ b/src/Interpreters/HashJoin/JoinUsedFlags.h @@ -26,10 +26,10 @@ public: /// Update size for vector with flags. /// Calling this method invalidates existing flags. /// It can be called several times, but all of them should happen before using this structure. - template + template void reinit(size_t size) { - if constexpr (MapGetter::flagged) + if constexpr (MapGetter::flagged) { assert(flags[nullptr].size() <= size); need_flags = true; @@ -43,10 +43,10 @@ public: } } - template + template void reinit(const Block * block_ptr) { - if constexpr (MapGetter::flagged) + if constexpr (MapGetter::flagged) { assert(flags[block_ptr].size() <= block_ptr->rows()); need_flags = true; @@ -148,6 +148,31 @@ public: } } + template + bool setUsedOnce(const Block * block, size_t row_num, size_t offset) + { + if constexpr (!use_flags) + return true; + + if constexpr (flag_per_row) + { + /// fast check to prevent heavy CAS with seq_cst order + if (flags[block][row_num].load(std::memory_order_relaxed)) + return false; + + bool expected = false; + return flags[block][row_num].compare_exchange_strong(expected, true); + } + else + { + /// fast check to prevent heavy CAS with seq_cst order + if (flags[nullptr][offset].load(std::memory_order_relaxed)) + return false; + + bool expected = false; + return flags[nullptr][offset].compare_exchange_strong(expected, true); + } + } }; } diff --git a/src/Interpreters/HashJoin/LeftHashJoin.cpp b/src/Interpreters/HashJoin/LeftHashJoin.cpp index 69e17ff70bd..4e06789570e 100644 --- a/src/Interpreters/HashJoin/LeftHashJoin.cpp +++ b/src/Interpreters/HashJoin/LeftHashJoin.cpp @@ -1,11 +1,14 @@ -#include +#include namespace DB { template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; +template class HashJoinMethods; template class HashJoinMethods; } diff --git a/src/Interpreters/HashJoin/RightHashJoin.cpp b/src/Interpreters/HashJoin/RightHashJoin.cpp index 8e304754f5c..d9d41d7d63c 100644 --- a/src/Interpreters/HashJoin/RightHashJoin.cpp +++ b/src/Interpreters/HashJoin/RightHashJoin.cpp @@ -1,4 +1,4 @@ -#include +#include namespace DB { diff --git a/src/Interpreters/InterpreterCreateQuery.cpp b/src/Interpreters/InterpreterCreateQuery.cpp index a101e5e8f09..95143031707 100644 --- a/src/Interpreters/InterpreterCreateQuery.cpp +++ b/src/Interpreters/InterpreterCreateQuery.cpp @@ -690,7 +690,7 @@ ColumnsDescription InterpreterCreateQuery::getColumnsDescription( throw Exception(ErrorCodes::LOGICAL_ERROR, "Neither default value expression nor type is provided for a column"); if (col_decl.comment) - column.comment = col_decl.comment->as().value.get(); + column.comment = col_decl.comment->as().value.safeGet(); if (col_decl.codec) { @@ -787,10 +787,8 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti if (index_desc.type == INVERTED_INDEX_NAME && !settings.allow_experimental_inverted_index) throw Exception(ErrorCodes::ILLEGAL_INDEX, "Please use index type 'full_text' instead of 'inverted'"); /// ---- - if (index_desc.type == "annoy" && !settings.allow_experimental_annoy_index) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Annoy index is disabled. Turn on allow_experimental_annoy_index"); - if (index_desc.type == "usearch" && !settings.allow_experimental_usearch_index) - throw Exception(ErrorCodes::INCORRECT_QUERY, "USearch index is disabled. Turn on allow_experimental_usearch_index"); + if (index_desc.type == "vector_similarity" && !settings.allow_experimental_vector_similarity_index) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index is disabled. Turn on allow_experimental_vector_similarity_index"); properties.indices.push_back(index_desc); } @@ -1916,7 +1914,7 @@ void InterpreterCreateQuery::prepareOnClusterQuery(ASTCreateQuery & create, Cont if (has_explicit_zk_path_arg) { - String zk_path = create.storage->engine->arguments->children[0]->as()->value.get(); + String zk_path = create.storage->engine->arguments->children[0]->as()->value.safeGet(); Macros::MacroExpansionInfo info; info.table_id.uuid = create.uuid; info.ignore_unknown = true; diff --git a/src/Interpreters/InterpreterDeleteQuery.cpp b/src/Interpreters/InterpreterDeleteQuery.cpp index 291c8e19db0..4827edc6c2a 100644 --- a/src/Interpreters/InterpreterDeleteQuery.cpp +++ b/src/Interpreters/InterpreterDeleteQuery.cpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace DB @@ -27,7 +28,6 @@ namespace ErrorCodes extern const int TABLE_IS_READ_ONLY; extern const int SUPPORT_IS_DISABLED; extern const int BAD_ARGUMENTS; - extern const int NOT_IMPLEMENTED; extern const int QUERY_IS_PROHIBITED; } @@ -67,13 +67,42 @@ BlockIO InterpreterDeleteQuery::execute() auto table_lock = table->lockForShare(getContext()->getCurrentQueryId(), getContext()->getSettingsRef().lock_acquire_timeout); auto metadata_snapshot = table->getInMemoryMetadataPtr(); - auto lightweightDelete = [&]() + if (table->supportsDelete()) + { + /// Convert to MutationCommand + MutationCommands mutation_commands; + MutationCommand mut_command; + + mut_command.type = MutationCommand::Type::DELETE; + mut_command.predicate = delete_query.predicate; + + mutation_commands.emplace_back(mut_command); + + table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); + MutationsInterpreter::Settings settings(false); + MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate(); + table->mutate(mutation_commands, getContext()); + return {}; + } + else if (table->supportsLightweightDelete()) { if (!getContext()->getSettingsRef().enable_lightweight_delete) throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Lightweight delete mutate is disabled. " "Set `enable_lightweight_delete` setting to enable it"); + if (metadata_snapshot->hasProjections()) + { + if (const auto * merge_tree_data = dynamic_cast(table.get())) + if (merge_tree_data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW) + throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, + "DELETE query is not allowed for table {} because as it has projections and setting " + "lightweight_mutation_projection_mode is set to THROW. " + "User should change lightweight_mutation_projection_mode OR " + "drop all the projections manually before running the query", + table_id.getFullTableName()); + } + /// Build "ALTER ... UPDATE _row_exists = 0 WHERE predicate" query String alter_query = "ALTER TABLE " + table->getStorageID().getFullTableName() @@ -94,79 +123,9 @@ BlockIO InterpreterDeleteQuery::execute() context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync)); InterpreterAlterQuery alter_interpreter(alter_ast, context); return alter_interpreter.execute(); - }; - - if (table->supportsDelete()) - { - /// Convert to MutationCommand - MutationCommands mutation_commands; - MutationCommand mut_command; - - mut_command.type = MutationCommand::Type::DELETE; - mut_command.predicate = delete_query.predicate; - - mutation_commands.emplace_back(mut_command); - - table->checkMutationIsPossible(mutation_commands, getContext()->getSettingsRef()); - MutationsInterpreter::Settings settings(false); - MutationsInterpreter(table, metadata_snapshot, mutation_commands, getContext(), settings).validate(); - table->mutate(mutation_commands, getContext()); - return {}; - } - else if (table->supportsLightweightDelete()) - { - return lightweightDelete(); } else { - if (table->hasProjection()) - { - auto context = Context::createCopy(getContext()); - auto mode = context->getSettingsRef().lightweight_mutation_projection_mode; - if (mode == LightweightMutationProjectionMode::THROW) - { - throw Exception(ErrorCodes::NOT_IMPLEMENTED, - "DELETE query is not supported for table {} as it has projections. " - "User should drop all the projections manually before running the query", - table->getStorageID().getFullTableName()); - } - else if (mode == LightweightMutationProjectionMode::DROP) - { - std::vector all_projections = metadata_snapshot->projections.getAllRegisteredNames(); - - context->setSetting("mutations_sync", Field(context->getSettingsRef().lightweight_deletes_sync)); - - /// Drop projections first so that lightweight delete can be performed. - for (const auto & projection : all_projections) - { - String alter_query = - "ALTER TABLE " + table->getStorageID().getFullTableName() - + (delete_query.cluster.empty() ? "" : " ON CLUSTER " + backQuoteIfNeed(delete_query.cluster)) - + " DROP PROJECTION IF EXISTS " + projection; - - ParserAlterQuery parser; - ASTPtr alter_ast = parseQuery( - parser, - alter_query.data(), - alter_query.data() + alter_query.size(), - "ALTER query", - 0, - DBMS_DEFAULT_MAX_PARSER_DEPTH, - DBMS_DEFAULT_MAX_PARSER_BACKTRACKS); - - InterpreterAlterQuery alter_interpreter(alter_ast, context); - alter_interpreter.execute(); - } - } - else - { - throw Exception(ErrorCodes::BAD_ARGUMENTS, - "Unrecognized lightweight_mutation_projection_mode, only throw and drop are allowed."); - } - - return lightweightDelete(); - } - throw Exception(ErrorCodes::BAD_ARGUMENTS, "DELETE query is not supported for table {}", table->getStorageID().getFullTableName()); diff --git a/src/Interpreters/InterpreterExplainQuery.cpp b/src/Interpreters/InterpreterExplainQuery.cpp index bedd9cb4a80..c820f999e0c 100644 --- a/src/Interpreters/InterpreterExplainQuery.cpp +++ b/src/Interpreters/InterpreterExplainQuery.cpp @@ -332,7 +332,7 @@ ExplainSettings checkAndGetSettings(const ASTPtr & ast_settings) if (settings.hasBooleanSetting(change.name)) { - auto value = change.value.get(); + auto value = change.value.safeGet(); if (value > 1) throw Exception(ErrorCodes::INVALID_SETTING_VALUE, "Invalid value {} for setting \"{}\". " "Expected boolean type", value, change.name); @@ -341,7 +341,7 @@ ExplainSettings checkAndGetSettings(const ASTPtr & ast_settings) } else { - auto value = change.value.get(); + auto value = change.value.safeGet(); settings.setIntegerSetting(change.name, value); } } diff --git a/src/Interpreters/InterpreterKillQueryQuery.cpp b/src/Interpreters/InterpreterKillQueryQuery.cpp index 7eb487ba7b3..2c579f3b468 100644 --- a/src/Interpreters/InterpreterKillQueryQuery.cpp +++ b/src/Interpreters/InterpreterKillQueryQuery.cpp @@ -334,7 +334,7 @@ BlockIO InterpreterKillQueryQuery::execute() for (size_t i = 0; i < moves_block.rows(); ++i) { table_id = StorageID{database_col.getDataAt(i).toString(), table_col.getDataAt(i).toString()}; - auto task_uuid = task_uuid_col[i].get(); + auto task_uuid = task_uuid_col[i].safeGet(); CancellationCode code = CancellationCode::Unknown; diff --git a/src/Interpreters/InterpreterSelectQuery.cpp b/src/Interpreters/InterpreterSelectQuery.cpp index cb42a8abf9c..0c79f4310ce 100644 --- a/src/Interpreters/InterpreterSelectQuery.cpp +++ b/src/Interpreters/InterpreterSelectQuery.cpp @@ -1232,7 +1232,7 @@ SortDescription InterpreterSelectQuery::getSortDescription(const ASTSelectQuery std::shared_ptr collator; if (order_by_elem.getCollation()) - collator = std::make_shared(order_by_elem.getCollation()->as().value.get()); + collator = std::make_shared(order_by_elem.getCollation()->as().value.safeGet()); if (order_by_elem.with_fill) { diff --git a/src/Interpreters/InterpreterSystemQuery.cpp b/src/Interpreters/InterpreterSystemQuery.cpp index c284acfa308..1cd55a0020c 100644 --- a/src/Interpreters/InterpreterSystemQuery.cpp +++ b/src/Interpreters/InterpreterSystemQuery.cpp @@ -663,13 +663,16 @@ BlockIO InterpreterSystemQuery::execute() startStopAction(ActionLocks::ViewRefresh, false); break; case Type::REFRESH_VIEW: - getRefreshTask()->run(); + for (const auto & task : getRefreshTasks()) + task->run(); break; case Type::CANCEL_VIEW: - getRefreshTask()->cancel(); + for (const auto & task : getRefreshTasks()) + task->cancel(); break; case Type::TEST_VIEW: - getRefreshTask()->setFakeTime(query.fake_time_for_view); + for (const auto & task : getRefreshTasks()) + task->setFakeTime(query.fake_time_for_view); break; case Type::DROP_REPLICA: dropReplica(query); @@ -710,14 +713,8 @@ BlockIO InterpreterSystemQuery::execute() case Type::FLUSH_LOGS: { getContext()->checkAccess(AccessType::SYSTEM_FLUSH_LOGS); - - auto logs = getContext()->getSystemLogs(); - std::vector> commands; - commands.reserve(logs.size()); - for (auto * system_log : logs) - commands.emplace_back([system_log] { system_log->flush(true); }); - - executeCommandsAndThrowIfError(commands); + auto system_logs = getContext()->getSystemLogs(); + system_logs.flush(true); break; } case Type::STOP_LISTEN: @@ -1248,15 +1245,15 @@ void InterpreterSystemQuery::flushDistributed(ASTSystemQuery & query) throw Exception(ErrorCodes::NOT_IMPLEMENTED, "SYSTEM RESTART DISK is not supported"); } -RefreshTaskHolder InterpreterSystemQuery::getRefreshTask() +RefreshTaskList InterpreterSystemQuery::getRefreshTasks() { auto ctx = getContext(); ctx->checkAccess(AccessType::SYSTEM_VIEWS); - auto task = ctx->getRefreshSet().getTask(table_id); - if (!task) + auto tasks = ctx->getRefreshSet().findTasks(table_id); + if (tasks.empty()) throw Exception( ErrorCodes::BAD_ARGUMENTS, "Refreshable view {} doesn't exist", table_id.getNameForLogs()); - return task; + return tasks; } diff --git a/src/Interpreters/InterpreterSystemQuery.h b/src/Interpreters/InterpreterSystemQuery.h index 776dd7915f0..f44fe930b04 100644 --- a/src/Interpreters/InterpreterSystemQuery.h +++ b/src/Interpreters/InterpreterSystemQuery.h @@ -74,7 +74,7 @@ private: void flushDistributed(ASTSystemQuery & query); [[noreturn]] void restartDisk(String & name); - RefreshTaskHolder getRefreshTask(); + RefreshTaskList getRefreshTasks(); AccessRightsElements getRequiredAccessForDDLOnCluster() const; void startStopAction(StorageActionBlockType action_type, bool start); diff --git a/src/Interpreters/MutationsInterpreter.cpp b/src/Interpreters/MutationsInterpreter.cpp index 5de33b3ed86..24635870e62 100644 --- a/src/Interpreters/MutationsInterpreter.cpp +++ b/src/Interpreters/MutationsInterpreter.cpp @@ -219,7 +219,7 @@ bool isStorageTouchedByMutations( Block tmp_block; while (executor.pull(tmp_block)); - auto count = (*block.getByName("count()").column)[0].get(); + auto count = (*block.getByName("count()").column)[0].safeGet(); return count != 0; } diff --git a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp index dd205ae6508..913f9900b77 100644 --- a/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp +++ b/src/Interpreters/OptimizeDateOrDateTimeConverterWithPreimageVisitor.cpp @@ -42,13 +42,13 @@ ASTPtr generateOptimizedDateFilterAST(const String & comparator, const NameAndTy if (isDateOrDate32(column.type.get())) { - start_date_or_date_time = date_lut.dateToString(range.first.get()); - end_date_or_date_time = date_lut.dateToString(range.second.get()); + start_date_or_date_time = date_lut.dateToString(range.first.safeGet()); + end_date_or_date_time = date_lut.dateToString(range.second.safeGet()); } else if (isDateTime(column.type.get()) || isDateTime64(column.type.get())) { - start_date_or_date_time = date_lut.timeToString(range.first.get()); - end_date_or_date_time = date_lut.timeToString(range.second.get()); + start_date_or_date_time = date_lut.timeToString(range.first.safeGet()); + end_date_or_date_time = date_lut.timeToString(range.second.safeGet()); } else [[unlikely]] return {}; diff --git a/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp index 48c9988b6fc..e9a663d53b0 100644 --- a/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp +++ b/src/Interpreters/OptimizeIfWithConstantConditionVisitor.cpp @@ -24,7 +24,7 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v if (literal->value.getType() == Field::Types::Int64 || literal->value.getType() == Field::Types::UInt64) { - value = literal->value.get(); + value = literal->value.safeGet(); return true; } if (literal->value.getType() == Field::Types::Null) @@ -51,7 +51,7 @@ static bool tryExtractConstValueFromCondition(const ASTPtr & condition, bool & v { if (type_literal->value.getType() == Field::Types::String) { - const auto & type_str = type_literal->value.get(); + const auto & type_str = type_literal->value.safeGet(); if (type_str == "UInt8" || type_str == "Nullable(UInt8)") return tryExtractConstValueFromCondition(expr_list->children.at(0), value); } diff --git a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp index 54515ea072a..86cec8659f5 100644 --- a/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp +++ b/src/Interpreters/OptimizeShardingKeyRewriteInVisitor.cpp @@ -72,7 +72,7 @@ bool shardContains( if (sharding_value.isNull()) return false; - UInt64 value = sharding_value.get(); + UInt64 value = sharding_value.safeGet(); const auto shard_num = data.slots[value % data.slots.size()] + 1; return data.shard_info.shard_num == shard_num; } @@ -120,11 +120,20 @@ void OptimizeShardingKeyRewriteInMatcher::visit(ASTFunction & function, Data & d else if (auto * tuple_literal = right->as(); tuple_literal && tuple_literal->value.getType() == Field::Types::Tuple) { - auto & tuple = tuple_literal->value.get(); - std::erase_if(tuple, [&](auto & child) + auto & tuple = tuple_literal->value.safeGet(); + if (tuple.size() > 1) { - return tuple.size() > 1 && !shardContains(child, name, data); - }); + Tuple new_tuple; + + for (auto & child : tuple) + if (shardContains(child, name, data)) + new_tuple.emplace_back(std::move(child)); + + if (new_tuple.empty()) + new_tuple.emplace_back(std::move(tuple.back())); + + tuple_literal->value = std::move(new_tuple); + } } } @@ -159,7 +168,7 @@ public: { if (isTuple(constant->getResultType())) { - const auto & tuple = constant->getValue().get(); + const auto & tuple = constant->getValue().safeGet(); Tuple new_tuple; new_tuple.reserve(tuple.size()); diff --git a/src/Interpreters/ProcessList.h b/src/Interpreters/ProcessList.h index accb73e12df..248ba947bc1 100644 --- a/src/Interpreters/ProcessList.h +++ b/src/Interpreters/ProcessList.h @@ -244,6 +244,9 @@ public: /// Same as checkTimeLimit but it never throws [[nodiscard]] bool checkTimeLimitSoft(); + /// Use it in case of the query left in background to execute asynchronously + void updateContext(ContextWeakPtr weak_context) { context = std::move(weak_context); } + /// Get the reference for the start of the query. Used to synchronize with other Stopwatches UInt64 getQueryCPUStartTime() { return watch.getStart(); } }; diff --git a/src/Interpreters/RewriteCountVariantsVisitor.cpp b/src/Interpreters/RewriteCountVariantsVisitor.cpp index 4a541c3765a..272e1ac735f 100644 --- a/src/Interpreters/RewriteCountVariantsVisitor.cpp +++ b/src/Interpreters/RewriteCountVariantsVisitor.cpp @@ -53,7 +53,7 @@ void RewriteCountVariantsVisitor::visit(ASTFunction & func) { if (first_arg_literal->value.getType() == Field::Types::UInt64) { - auto constant = first_arg_literal->value.get(); + auto constant = first_arg_literal->value.safeGet(); if (constant == 1 && !context->getSettingsRef().aggregate_functions_null_for_empty) transform = true; } diff --git a/src/Interpreters/Squashing.cpp b/src/Interpreters/Squashing.cpp index 483112df6cb..95b76c60063 100644 --- a/src/Interpreters/Squashing.cpp +++ b/src/Interpreters/Squashing.cpp @@ -5,7 +5,6 @@ #include #include - namespace DB { @@ -125,20 +124,32 @@ Chunk Squashing::squash(std::vector && input_chunks, Chunk::ChunkInfoColl { auto & first_chunk = input_chunks[0]; Columns columns = first_chunk.detachColumns(); + mutable_columns.reserve(columns.size()); for (auto & column : columns) - { mutable_columns.push_back(IColumn::mutate(std::move(column))); - mutable_columns.back()->reserve(rows); - } } + size_t num_columns = mutable_columns.size(); + /// Collect the list of source columns for each column. + std::vector source_columns_list(num_columns, Columns{}); + for (size_t i = 0; i != num_columns; ++i) + source_columns_list[i].reserve(input_chunks.size() - 1); + for (size_t i = 1; i < input_chunks.size(); ++i) // We've already processed the first chunk above { - Columns columns = input_chunks[i].detachColumns(); - for (size_t j = 0, size = mutable_columns.size(); j < size; ++j) + auto columns = input_chunks[i].detachColumns(); + for (size_t j = 0; j != num_columns; ++j) + source_columns_list[j].emplace_back(std::move(columns[j])); + } + + for (size_t i = 0; i != num_columns; ++i) + { + /// We know all the data we will insert in advance and can make all necessary pre-allocations. + mutable_columns[i]->prepareForSquashing(source_columns_list[i]); + for (auto & source_column : source_columns_list[i]) { - const auto source_column = columns[j]; - mutable_columns[j]->insertRangeFrom(*source_column, 0, source_column->size()); + auto column = std::move(source_column); + mutable_columns[i]->insertRangeFrom(*column, 0, column->size()); } } diff --git a/src/Interpreters/SystemLog.cpp b/src/Interpreters/SystemLog.cpp index 572481e6b12..832c39bfaf8 100644 --- a/src/Interpreters/SystemLog.cpp +++ b/src/Interpreters/SystemLog.cpp @@ -1,6 +1,7 @@ #include #include +#include #include #include #include @@ -49,6 +50,7 @@ #include + namespace DB { @@ -282,85 +284,21 @@ ASTPtr getCreateTableQueryClean(const StorageID & table_id, ContextPtr context) SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConfiguration & config) { - query_log = createSystemLog(global_context, "system", "query_log", config, "query_log", "Contains information about executed queries, for example, start time, duration of processing, error messages."); - query_thread_log = createSystemLog(global_context, "system", "query_thread_log", config, "query_thread_log", "Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing."); - part_log = createSystemLog(global_context, "system", "part_log", config, "part_log", "This table contains information about events that occurred with data parts in the MergeTree family tables, such as adding or merging data."); - trace_log = createSystemLog(global_context, "system", "trace_log", config, "trace_log", "Contains stack traces collected by the sampling query profiler."); - crash_log = createSystemLog(global_context, "system", "crash_log", config, "crash_log", "Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur."); - text_log = createSystemLog(global_context, "system", "text_log", config, "text_log", "Contains logging entries which are normally written to a log file or to stdout."); - metric_log = createSystemLog(global_context, "system", "metric_log", config, "metric_log", "Contains history of metrics values from tables system.metrics and system.events, periodically flushed to disk."); - error_log = createSystemLog(global_context, "system", "error_log", config, "error_log", "Contains history of error values from table system.errors, periodically flushed to disk."); - filesystem_cache_log = createSystemLog(global_context, "system", "filesystem_cache_log", config, "filesystem_cache_log", "Contains a history of all events occurred with filesystem cache for objects on a remote filesystem."); - filesystem_read_prefetches_log = createSystemLog( - global_context, "system", "filesystem_read_prefetches_log", config, "filesystem_read_prefetches_log", "Contains a history of all prefetches done during reading from MergeTables backed by a remote filesystem."); - asynchronous_metric_log = createSystemLog( - global_context, "system", "asynchronous_metric_log", config, - "asynchronous_metric_log", "Contains the historical values for system.asynchronous_metrics, once per time interval (one second by default)."); - opentelemetry_span_log = createSystemLog( - global_context, "system", "opentelemetry_span_log", config, - "opentelemetry_span_log", "Contains information about trace spans for executed queries."); - query_views_log = createSystemLog(global_context, "system", "query_views_log", config, "query_views_log", "Contains information about the dependent views executed when running a query, for example, the view type or the execution time."); - zookeeper_log = createSystemLog(global_context, "system", "zookeeper_log", config, "zookeeper_log", "This table contains information about the parameters of the request to the ZooKeeper server and the response from it."); - session_log = createSystemLog(global_context, "system", "session_log", config, "session_log", "Contains information about all successful and failed login and logout events."); - transactions_info_log = createSystemLog( - global_context, "system", "transactions_info_log", config, "transactions_info_log", "Contains information about all transactions executed on a current server."); - processors_profile_log = createSystemLog(global_context, "system", "processors_profile_log", config, "processors_profile_log", "Contains profiling information on processors level (building blocks for a pipeline for query execution."); - asynchronous_insert_log = createSystemLog(global_context, "system", "asynchronous_insert_log", config, "asynchronous_insert_log", "Contains a history for all asynchronous inserts executed on current server."); - backup_log = createSystemLog(global_context, "system", "backup_log", config, "backup_log", "Contains logging entries with the information about BACKUP and RESTORE operations."); - s3_queue_log = createSystemLog(global_context, "system", "s3queue_log", config, "s3queue_log", "Contains logging entries with the information files processes by S3Queue engine."); - azure_queue_log = createSystemLog(global_context, "system", "azure_queue_log", config, "azure_queue_log", "Contains logging entries with the information files processes by S3Queue engine."); - blob_storage_log = createSystemLog(global_context, "system", "blob_storage_log", config, "blob_storage_log", "Contains logging entries with information about various blob storage operations such as uploads and deletes."); +/// NOLINTBEGIN(bugprone-macro-parentheses) +#define CREATE_PUBLIC_MEMBERS(log_type, member, descr) \ + member = createSystemLog(global_context, "system", #member, config, #member, descr); \ + + LIST_OF_ALL_SYSTEM_LOGS(CREATE_PUBLIC_MEMBERS) +#undef CREATE_PUBLIC_MEMBERS +/// NOLINTEND(bugprone-macro-parentheses) - if (query_log) - logs.emplace_back(query_log.get()); - if (query_thread_log) - logs.emplace_back(query_thread_log.get()); - if (part_log) - logs.emplace_back(part_log.get()); - if (trace_log) - logs.emplace_back(trace_log.get()); - if (crash_log) - logs.emplace_back(crash_log.get()); - if (text_log) - logs.emplace_back(text_log.get()); - if (metric_log) - logs.emplace_back(metric_log.get()); - if (error_log) - logs.emplace_back(error_log.get()); - if (asynchronous_metric_log) - logs.emplace_back(asynchronous_metric_log.get()); - if (opentelemetry_span_log) - logs.emplace_back(opentelemetry_span_log.get()); - if (query_views_log) - logs.emplace_back(query_views_log.get()); - if (zookeeper_log) - logs.emplace_back(zookeeper_log.get()); if (session_log) - { - logs.emplace_back(session_log.get()); global_context->addWarningMessage("Table system.session_log is enabled. It's unreliable and may contain garbage. Do not use it for any kind of security monitoring."); - } - if (transactions_info_log) - logs.emplace_back(transactions_info_log.get()); - if (processors_profile_log) - logs.emplace_back(processors_profile_log.get()); - if (filesystem_cache_log) - logs.emplace_back(filesystem_cache_log.get()); - if (filesystem_read_prefetches_log) - logs.emplace_back(filesystem_read_prefetches_log.get()); - if (asynchronous_insert_log) - logs.emplace_back(asynchronous_insert_log.get()); - if (backup_log) - logs.emplace_back(backup_log.get()); - if (s3_queue_log) - logs.emplace_back(s3_queue_log.get()); - if (blob_storage_log) - logs.emplace_back(blob_storage_log.get()); bool should_prepare = global_context->getServerSettings().prepare_system_log_tables_on_startup; try { - for (auto & log : logs) + for (auto & log : getAllLogs()) { log->startup(); if (should_prepare) @@ -394,20 +332,54 @@ SystemLogs::SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConf } } - -SystemLogs::~SystemLogs() +std::vector SystemLogs::getAllLogs() const { +#define GET_RAW_POINTERS(log_type, member, descr) \ + (member).get(), \ + + std::vector result = { + LIST_OF_ALL_SYSTEM_LOGS(GET_RAW_POINTERS) + }; +#undef GET_RAW_POINTERS + + auto last_it = std::remove(result.begin(), result.end(), nullptr); + result.erase(last_it, result.end()); + + return result; +} + +void SystemLogs::flush(bool should_prepare_tables_anyway) +{ + auto logs = getAllLogs(); + std::vector logs_indexes(logs.size(), 0); + + for (size_t i = 0; i < logs.size(); ++i) + { + auto last_log_index = logs[i]->getLastLogIndex(); + logs_indexes[i] = last_log_index; + logs[i]->notifyFlush(last_log_index, should_prepare_tables_anyway); + } + + for (size_t i = 0; i < logs.size(); ++i) + logs[i]->flush(logs_indexes[i], should_prepare_tables_anyway); +} + +void SystemLogs::flushAndShutdown() +{ + flush(/* should_prepare_tables_anyway */ false); shutdown(); } void SystemLogs::shutdown() { + auto logs = getAllLogs(); for (auto & log : logs) log->shutdown(); } void SystemLogs::handleCrash() { + auto logs = getAllLogs(); for (auto & log : logs) log->handleCrash(); } @@ -462,33 +434,26 @@ void SystemLog::savingThreadFunction() { setThreadName("SystemLogFlush"); - std::vector to_flush; - bool exit_this_thread = false; - while (!exit_this_thread) + while (true) { try { - // The end index (exclusive, like std end()) of the messages we are - // going to flush. - uint64_t to_flush_end = 0; - // Should we prepare table even if there are no new messages. - bool should_prepare_tables_anyway = false; + auto result = queue->pop(); - to_flush_end = queue->pop(to_flush, should_prepare_tables_anyway, exit_this_thread); - - if (to_flush.empty()) + if (result.is_shutdown) { - if (should_prepare_tables_anyway) - { - prepareTable(); - LOG_TRACE(log, "Table created (force)"); - - queue->confirm(to_flush_end); - } + LOG_TRACE(log, "Terminating"); + return; } - else + + if (!result.logs.empty()) { - flushImpl(to_flush, to_flush_end); + flushImpl(result.logs, result.last_log_index); + } + else if (result.create_table_force) + { + prepareTable(); + queue->confirm(result.last_log_index); } } catch (...) @@ -496,7 +461,6 @@ void SystemLog::savingThreadFunction() tryLogCurrentException(__PRETTY_FUNCTION__); } } - LOG_TRACE(log, "Terminating"); } diff --git a/src/Interpreters/SystemLog.h b/src/Interpreters/SystemLog.h index 0ac468b15ec..9e1af3578bd 100644 --- a/src/Interpreters/SystemLog.h +++ b/src/Interpreters/SystemLog.h @@ -5,6 +5,32 @@ #include #include +#include + +#define LIST_OF_ALL_SYSTEM_LOGS(M) \ + M(QueryLog, query_log, "Contains information about executed queries, for example, start time, duration of processing, error messages.") \ + M(QueryThreadLog, query_thread_log, "Contains information about threads that execute queries, for example, thread name, thread start time, duration of query processing.") \ + M(PartLog, part_log, "This table contains information about events that occurred with data parts in the MergeTree family tables, such as adding or merging data.") \ + M(TraceLog, trace_log, "Contains stack traces collected by the sampling query profiler.") \ + M(CrashLog, crash_log, "Contains information about stack traces for fatal errors. The table does not exist in the database by default, it is created only when fatal errors occur.") \ + M(TextLog, text_log, "Contains logging entries which are normally written to a log file or to stdout.") \ + M(MetricLog, metric_log, "Contains history of metrics values from tables system.metrics and system.events, periodically flushed to disk.") \ + M(ErrorLog, error_log, "Contains history of error values from table system.errors, periodically flushed to disk.") \ + M(FilesystemCacheLog, filesystem_cache_log, "Contains a history of all events occurred with filesystem cache for objects on a remote filesystem.") \ + M(FilesystemReadPrefetchesLog, filesystem_read_prefetches_log, "Contains a history of all prefetches done during reading from MergeTables backed by a remote filesystem.") \ + M(ObjectStorageQueueLog, s3queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ + M(ObjectStorageQueueLog, azure_queue_log, "Contains logging entries with the information files processes by S3Queue engine.") \ + M(AsynchronousMetricLog, asynchronous_metric_log, "Contains the historical values for system.asynchronous_metrics, once per time interval (one second by default).") \ + M(OpenTelemetrySpanLog, opentelemetry_span_log, "Contains information about trace spans for executed queries.") \ + M(QueryViewsLog, query_views_log, "Contains information about the dependent views executed when running a query, for example, the view type or the execution time.") \ + M(ZooKeeperLog, zookeeper_log, "This table contains information about the parameters of the request to the ZooKeeper server and the response from it.") \ + M(SessionLog, session_log, "Contains information about all successful and failed login and logout events.") \ + M(TransactionsInfoLog, transactions_info_log, "Contains information about all transactions executed on a current server.") \ + M(ProcessorsProfileLog, processors_profile_log, "Contains profiling information on processors level (building blocks for a pipeline for query execution.") \ + M(AsynchronousInsertLog, asynchronous_insert_log, "Contains a history for all asynchronous inserts executed on current server.") \ + M(BackupLog, backup_log, "Contains logging entries with the information about BACKUP and RESTORE operations.") \ + M(BlobStorageLog, blob_storage_log, "Contains logging entries with information about various blob storage operations such as uploads and deletes.") \ + namespace DB { @@ -34,71 +60,37 @@ namespace DB }; */ -class QueryLog; -class QueryThreadLog; -class PartLog; -class TextLog; -class TraceLog; -class CrashLog; -class ErrorLog; -class MetricLog; -class AsynchronousMetricLog; -class OpenTelemetrySpanLog; -class QueryViewsLog; -class ZooKeeperLog; -class SessionLog; -class TransactionsInfoLog; -class ProcessorsProfileLog; -class FilesystemCacheLog; -class FilesystemReadPrefetchesLog; -class AsynchronousInsertLog; -class BackupLog; -class ObjectStorageQueueLog; -class BlobStorageLog; +/// NOLINTBEGIN(bugprone-macro-parentheses) +#define FORWARD_DECLARATION(log_type, member, descr) \ + class log_type; \ + +LIST_OF_ALL_SYSTEM_LOGS(FORWARD_DECLARATION) +#undef FORWARD_DECLARATION +/// NOLINTEND(bugprone-macro-parentheses) + /// System logs should be destroyed in destructor of the last Context and before tables, /// because SystemLog destruction makes insert query while flushing data into underlying tables -struct SystemLogs +class SystemLogs { +public: + SystemLogs() = default; SystemLogs(ContextPtr global_context, const Poco::Util::AbstractConfiguration & config); - ~SystemLogs(); + SystemLogs(const SystemLogs & other) = default; + void flush(bool should_prepare_tables_anyway); + void flushAndShutdown(); void shutdown(); void handleCrash(); - std::shared_ptr query_log; /// Used to log queries. - std::shared_ptr query_thread_log; /// Used to log query threads. - std::shared_ptr part_log; /// Used to log operations with parts - std::shared_ptr trace_log; /// Used to log traces from query profiler - std::shared_ptr crash_log; /// Used to log server crashes. - std::shared_ptr text_log; /// Used to log all text messages. - std::shared_ptr metric_log; /// Used to log all metrics. - std::shared_ptr error_log; /// Used to log errors. - std::shared_ptr filesystem_cache_log; - std::shared_ptr filesystem_read_prefetches_log; - std::shared_ptr s3_queue_log; - std::shared_ptr azure_queue_log; - /// Metrics from system.asynchronous_metrics. - std::shared_ptr asynchronous_metric_log; - /// OpenTelemetry trace spans. - std::shared_ptr opentelemetry_span_log; - /// Used to log queries of materialized and live views - std::shared_ptr query_views_log; - /// Used to log all actions of ZooKeeper client - std::shared_ptr zookeeper_log; - /// Login, LogOut and Login failure events - std::shared_ptr session_log; - /// Events related to transactions - std::shared_ptr transactions_info_log; - /// Used to log processors profiling - std::shared_ptr processors_profile_log; - std::shared_ptr asynchronous_insert_log; - /// Backup and restore events - std::shared_ptr backup_log; - /// Log blob storage operations - std::shared_ptr blob_storage_log; +#define DECLARE_PUBLIC_MEMBERS(log_type, member, descr) \ + std::shared_ptr member; \ - std::vector logs; + LIST_OF_ALL_SYSTEM_LOGS(DECLARE_PUBLIC_MEMBERS) +#undef DECLARE_PUBLIC_MEMBERS + +private: + std::vector getAllLogs() const; }; struct SystemLogSettings diff --git a/src/Interpreters/TreeOptimizer.cpp b/src/Interpreters/TreeOptimizer.cpp index b872eb94fde..6483dd3be48 100644 --- a/src/Interpreters/TreeOptimizer.cpp +++ b/src/Interpreters/TreeOptimizer.cpp @@ -184,7 +184,7 @@ void optimizeGroupBy(ASTSelectQuery * select_query, ContextPtr context) const auto & value = group_exprs[i]->as()->value; if (value.getType() == Field::Types::UInt64) { - auto pos = value.get(); + auto pos = value.safeGet(); if (pos > 0 && pos <= select_query->select()->children.size()) keep_position = true; } diff --git a/src/Interpreters/WindowDescription.cpp b/src/Interpreters/WindowDescription.cpp index 31a881001e3..b1e12ff8048 100644 --- a/src/Interpreters/WindowDescription.cpp +++ b/src/Interpreters/WindowDescription.cpp @@ -94,8 +94,8 @@ void WindowFrame::checkValid() const if (begin_type == BoundaryType::Offset && !((begin_offset.getType() == Field::Types::UInt64 || begin_offset.getType() == Field::Types::Int64) - && begin_offset.get() >= 0 - && begin_offset.get() < INT_MAX)) + && begin_offset.safeGet() >= 0 + && begin_offset.safeGet() < INT_MAX)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Frame start offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given", @@ -107,8 +107,8 @@ void WindowFrame::checkValid() const if (end_type == BoundaryType::Offset && !((end_offset.getType() == Field::Types::UInt64 || end_offset.getType() == Field::Types::Int64) - && end_offset.get() >= 0 - && end_offset.get() < INT_MAX)) + && end_offset.safeGet() >= 0 + && end_offset.safeGet() < INT_MAX)) { throw Exception(ErrorCodes::BAD_ARGUMENTS, "Frame end offset for '{}' frame must be a nonnegative 32-bit integer, '{}' of type '{}' given", diff --git a/src/Interpreters/castColumn.cpp b/src/Interpreters/castColumn.cpp index 906dfb84b14..a779c9bc34d 100644 --- a/src/Interpreters/castColumn.cpp +++ b/src/Interpreters/castColumn.cpp @@ -26,11 +26,9 @@ static ColumnPtr castColumn(CastType cast_type, const ColumnWithTypeAndName & ar "" } }; - auto get_cast_func = [cast_type, &arguments] + auto get_cast_func = [from = arg, to = type, cast_type] { - - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(cast_type, {}); - return func_builder_cast->build(arguments); + return createInternalCast(from, to, cast_type, {}); }; FunctionBasePtr func_cast = cache ? cache->getOrSet(cast_type, from_name, to_name, std::move(get_cast_func)) : get_cast_func(); diff --git a/src/Interpreters/convertFieldToType.cpp b/src/Interpreters/convertFieldToType.cpp index bee4b494958..738c51baa64 100644 --- a/src/Interpreters/convertFieldToType.cpp +++ b/src/Interpreters/convertFieldToType.cpp @@ -57,7 +57,7 @@ template Field convertNumericTypeImpl(const Field & from) { To result; - if (!accurate::convertNumeric(from.get(), result)) + if (!accurate::convertNumeric(from.safeGet(), result)) return {}; return result; } @@ -88,7 +88,7 @@ Field convertNumericType(const Field & from, const IDataType & type) template Field convertIntToDecimalType(const Field & from, const DataTypeDecimal & type) { - From value = from.get(); + From value = from.safeGet(); if (!type.canStoreWhole(value)) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Number is too big to place in {}", type.getName()); @@ -100,7 +100,7 @@ Field convertIntToDecimalType(const Field & from, const DataTypeDecimal & typ template Field convertStringToDecimalType(const Field & from, const DataTypeDecimal & type) { - const String & str_value = from.get(); + const String & str_value = from.safeGet(); T value = type.parseFromString(str_value); return DecimalField(value, type.getScale()); } @@ -108,7 +108,7 @@ Field convertStringToDecimalType(const Field & from, const DataTypeDecimal & template Field convertDecimalToDecimalType(const Field & from, const DataTypeDecimal & type) { - auto field = from.get>(); + auto field = from.safeGet>(); T value = convertDecimals, DataTypeDecimal>(field.getValue(), field.getScale(), type.getScale()); return DecimalField(value, type.getScale()); } @@ -116,7 +116,7 @@ Field convertDecimalToDecimalType(const Field & from, const DataTypeDecimal & template Field convertFloatToDecimalType(const Field & from, const DataTypeDecimal & type) { - From value = from.get(); + From value = from.safeGet(); if (!type.canStoreWhole(value)) throw Exception(ErrorCodes::ARGUMENT_OUT_OF_BOUND, "Number is too big to place in {}", type.getName()); @@ -182,24 +182,24 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID /// Conversion between Date and DateTime and vice versa. if (which_type.isDate() && which_from_type.isDateTime()) { - return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.get()).toUnderType()); + return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.safeGet()).toUnderType()); } else if (which_type.isDate32() && which_from_type.isDateTime()) { - return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.get()).toUnderType()); + return static_cast(static_cast(*from_type_hint).getTimeZone().toDayNum(src.safeGet()).toUnderType()); } else if (which_type.isDateTime() && which_from_type.isDate()) { - return static_cast(type).getTimeZone().fromDayNum(DayNum(src.get())); + return static_cast(type).getTimeZone().fromDayNum(DayNum(src.safeGet())); } else if (which_type.isDateTime() && which_from_type.isDate32()) { - return static_cast(type).getTimeZone().fromDayNum(DayNum(src.get())); + return static_cast(type).getTimeZone().fromDayNum(DayNum(src.safeGet())); } else if (which_type.isDateTime64() && which_from_type.isDate()) { const auto & date_time64_type = static_cast(type); - const auto value = date_time64_type.getTimeZone().fromDayNum(DayNum(src.get())); + const auto value = date_time64_type.getTimeZone().fromDayNum(DayNum(src.safeGet())); return DecimalField( DecimalUtils::decimalFromComponentsWithMultiplier(value, 0, date_time64_type.getScaleMultiplier()), date_time64_type.getScale()); @@ -207,7 +207,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID else if (which_type.isDateTime64() && which_from_type.isDate32()) { const auto & date_time64_type = static_cast(type); - const auto value = date_time64_type.getTimeZone().fromDayNum(ExtendedDayNum(static_cast(src.get()))); + const auto value = date_time64_type.getTimeZone().fromDayNum(ExtendedDayNum(static_cast(src.safeGet()))); return DecimalField( DecimalUtils::decimalFromComponentsWithMultiplier(value, 0, date_time64_type.getScaleMultiplier()), date_time64_type.getScale()); @@ -257,7 +257,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (which_type.isDateTime64() && src.getType() == Field::Types::Decimal64) { - const auto & from_type = src.get(); + const auto & from_type = src.safeGet(); const auto & to_type = static_cast(type); const auto scale_from = from_type.getScale(); @@ -304,8 +304,8 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } if (which_type.isIPv4() && src.getType() == Field::Types::UInt64) { - /// convert to UInt32 which is the underlying type for native IPv4 - return convertNumericType(src, type); + /// convert through UInt32 which is the underlying type for native IPv4 + return static_cast(convertNumericType(src, type).safeGet()); } } else if (which_type.isUUID() && src.getType() == Field::Types::UUID) @@ -322,7 +322,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (which_from_type.isFixedString() && assert_cast(from_type_hint)->getN() == IPV6_BINARY_LENGTH) { const auto col = type.createColumn(); - ReadBufferFromString in_buffer(src.get()); + ReadBufferFromString in_buffer(src.safeGet()); type.getDefaultSerialization()->deserializeBinary(*col, in_buffer, {}); return (*col)[0]; } @@ -334,7 +334,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (which_type.isFixedString()) { size_t n = assert_cast(type).getN(); - const auto & src_str = src.get(); + const auto & src_str = src.safeGet(); if (src_str.size() < n) { String src_str_extended = src_str; @@ -351,7 +351,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID { if (src.getType() == Field::Types::Array) { - const Array & src_arr = src.get(); + const Array & src_arr = src.safeGet(); size_t src_arr_size = src_arr.size(); const auto & element_type = *(type_array->getNestedType()); @@ -374,7 +374,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID { if (src.getType() == Field::Types::Tuple) { - const auto & src_tuple = src.get(); + const auto & src_tuple = src.safeGet(); size_t src_tuple_size = src_tuple.size(); size_t dst_tuple_size = type_tuple->getElements().size(); @@ -419,7 +419,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID const auto & key_type = *type_map->getKeyType(); const auto & value_type = *type_map->getValueType(); - const auto & map = src.get(); + const auto & map = src.safeGet(); size_t map_size = map.size(); Map res(map_size); @@ -428,7 +428,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID for (size_t i = 0; i < map_size; ++i) { - const auto & map_entry = map[i].get(); + const auto & map_entry = map[i].safeGet(); const auto & key = map_entry[0]; const auto & value = map_entry[1]; @@ -457,7 +457,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID "Cannot convert {} to {}", src.getTypeName(), agg_func_type->getName()); - const auto & name = src.get().name; + const auto & name = src.safeGet().name; if (agg_func_type->getName() != name) throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert {} to {}", name, agg_func_type->getName()); @@ -472,7 +472,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (src.getType() == Field::Types::Tuple && from_type_tuple && from_type_tuple->haveExplicitNames()) { const auto & names = from_type_tuple->getElementNames(); - const auto & tuple = src.get(); + const auto & tuple = src.safeGet(); if (names.size() != tuple.size()) throw Exception(ErrorCodes::TYPE_MISMATCH, @@ -489,10 +489,10 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID if (src.getType() == Field::Types::Map) { Object object; - const auto & map = src.get(); + const auto & map = src.safeGet(); for (const auto & element : map) { - const auto & map_entry = element.get(); + const auto & map_entry = element.safeGet(); const auto & key = map_entry[0]; const auto & value = map_entry[1]; @@ -500,7 +500,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert from Map with key of type {} to Object", key.getTypeName()); - object[key.get()] = value; + object[key.safeGet()] = value; } return object; @@ -541,7 +541,7 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID } const auto col = type_to_parse->createColumn(); - ReadBufferFromString in_buffer(src.get()); + ReadBufferFromString in_buffer(src.safeGet()); try { type_to_parse->getDefaultSerialization()->deserializeWholeText(*col, in_buffer, FormatSettings{}); @@ -549,9 +549,9 @@ Field convertFieldToTypeImpl(const Field & src, const IDataType & type, const ID catch (Exception & e) { if (e.code() == ErrorCodes::UNEXPECTED_DATA_AFTER_PARSED_VALUE) - throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert string '{}' to type {}", src.get(), type.getName()); + throw Exception(ErrorCodes::TYPE_MISMATCH, "Cannot convert string '{}' to type {}", src.safeGet(), type.getName()); - e.addMessage(fmt::format("while converting '{}' to {}", src.get(), type.getName())); + e.addMessage(fmt::format("while converting '{}' to {}", src.safeGet(), type.getName())); throw; } @@ -614,7 +614,7 @@ Field convertFieldToTypeOrThrow(const Field & from_value, const IDataType & to_t template static bool decimalEqualsFloat(Field field, Float64 float_value) { - auto decimal_field = field.get>(); + auto decimal_field = field.safeGet>(); auto decimal_to_float = DecimalUtils::convertTo(decimal_field.getValue(), decimal_field.getScale()); return decimal_to_float == float_value; } @@ -633,13 +633,13 @@ std::optional convertFieldToTypeStrict(const Field & from_value, const ID { /// Convert back to Float64 and compare if (result_value.getType() == Field::Types::Decimal32) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; if (result_value.getType() == Field::Types::Decimal64) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; if (result_value.getType() == Field::Types::Decimal128) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; if (result_value.getType() == Field::Types::Decimal256) - return decimalEqualsFloat(result_value, from_value.get()) ? result_value : std::optional{}; + return decimalEqualsFloat(result_value, from_value.safeGet()) ? result_value : std::optional{}; throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown decimal type {}", result_value.getTypeName()); } diff --git a/src/Interpreters/evaluateConstantExpression.cpp b/src/Interpreters/evaluateConstantExpression.cpp index 4bfc80af1fe..d4bb0cc2f8a 100644 --- a/src/Interpreters/evaluateConstantExpression.cpp +++ b/src/Interpreters/evaluateConstantExpression.cpp @@ -297,7 +297,7 @@ namespace { if (tuple_literal->value.getType() == Field::Types::Tuple) { - const auto & tuple = tuple_literal->value.get(); + const auto & tuple = tuple_literal->value.safeGet(); for (const auto & child : tuple) { const auto dnf = analyzeEquals(identifier, child, expr); @@ -792,7 +792,7 @@ std::optional evaluateExpressionOverConstantCondition(const ASTPtr & nod else if (const auto * literal = node->as()) { // Check if it's always true or false. - if (literal->value.getType() == Field::Types::UInt64 && literal->value.get() == 0) + if (literal->value.getType() == Field::Types::UInt64 && literal->value.safeGet() == 0) return {result}; else return {}; diff --git a/src/Interpreters/joinDispatch.h b/src/Interpreters/joinDispatch.h index 54c5c7dc83a..4aabc49c29b 100644 --- a/src/Interpreters/joinDispatch.h +++ b/src/Interpreters/joinDispatch.h @@ -12,38 +12,53 @@ namespace DB { -template +/// HashJoin::MapsOne is more efficient, it only store one row for each key in the map. It is recommended to use it whenever possible. +/// When only need to match only one row from right table, use HashJoin::MapsOne. For example, LEFT ANY/SEMI/ANTI. +/// +/// HashJoin::MapsAll will store all rows for each key in the map. It is used when need to match multiple rows from right table. +/// For example, LEFT ALL, INNER ALL, RIGHT ALL/ANY. +/// +/// prefer_use_maps_all is true when there is mixed inequal condition in the join condition. For example, `t1.a = t2.a AND t1.b > t2.b`. +/// In this case, we need to use HashJoin::MapsAll to store all rows for each key in the map. We will select all matched rows from the map +/// and filter them by `t1.b > t2.b`. +/// +/// flagged indicates whether we need to store flags for each row whether it has been used in the join. See JoinUsedFlags.h. +template struct MapGetter; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; /// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -/// Only SEMI LEFT and SEMI RIGHT are valid. INNER and FULL are here for templates instantiation. -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; -template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +/// Only ANTI LEFT and ANTI RIGHT are valid. INNER and FULL are here for templates instantiation. +template <> struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template <> struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; +template struct MapGetter { using Map = HashJoin::MapsAll; static constexpr bool flagged = true; }; +template struct MapGetter { using Map = HashJoin::MapsOne; static constexpr bool flagged = false; }; -template -struct MapGetter { using Map = HashJoin::MapsAsof; static constexpr bool flagged = false; }; +template +struct MapGetter { using Map = HashJoin::MapsAsof; static constexpr bool flagged = false; }; static constexpr std::array STRICTNESSES = { JoinStrictness::RightAny, @@ -62,7 +77,7 @@ static constexpr std::array KINDS = { }; /// Init specified join map -inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin::MapsVariant & maps) +inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin::MapsVariant & maps, bool prefer_use_maps_all = false) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { @@ -70,7 +85,10 @@ inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin: constexpr auto j = ij % STRICTNESSES.size(); if (kind == KINDS[i] && strictness == STRICTNESSES[j]) { - maps = typename MapGetter::Map(); + if (prefer_use_maps_all) + maps = typename MapGetter::Map(); + else + maps = typename MapGetter::Map(); return true; } return false; @@ -79,7 +97,7 @@ inline bool joinDispatchInit(JoinKind kind, JoinStrictness strictness, HashJoin: /// Call function on specified join map template -inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & maps, Func && func) +inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & maps, bool prefer_use_maps_all, Func && func) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { @@ -89,10 +107,16 @@ inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & constexpr auto j = ij % STRICTNESSES.size(); if (kind == KINDS[i] && strictness == STRICTNESSES[j]) { - func( - std::integral_constant(), - std::integral_constant(), - std::get::Map>(maps)); + if (prefer_use_maps_all) + func( + std::integral_constant(), + std::integral_constant(), + std::get::Map>(maps)); + else + func( + std::integral_constant(), + std::integral_constant(), + std::get::Map>(maps)); return true; } return false; @@ -101,7 +125,7 @@ inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, MapsVariant & /// Call function on specified join map template -inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, std::vector & mapsv, Func && func) +inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, std::vector & mapsv, bool prefer_use_maps_all, Func && func) { return static_for<0, KINDS.size() * STRICTNESSES.size()>([&](auto ij) { @@ -111,17 +135,31 @@ inline bool joinDispatch(JoinKind kind, JoinStrictness strictness, std::vector::Map; - std::vector v; - v.reserve(mapsv.size()); - for (const auto & el : mapsv) - v.push_back(&std::get(*el)); + if (prefer_use_maps_all) + { + using MapType = typename MapGetter::Map; + std::vector v; + v.reserve(mapsv.size()); + for (const auto & el : mapsv) + v.push_back(&std::get(*el)); - func( - std::integral_constant(), - std::integral_constant(), - v - /*std::get::Map>(maps)*/); + func( + std::integral_constant(), std::integral_constant(), v + /*std::get::Map>(maps)*/); + } + else + { + using MapType = typename MapGetter::Map; + std::vector v; + v.reserve(mapsv.size()); + for (const auto & el : mapsv) + v.push_back(&std::get(*el)); + + func( + std::integral_constant(), std::integral_constant(), v + /*std::get::Map>(maps)*/); + + } return true; } return false; diff --git a/src/Interpreters/replaceForPositionalArguments.cpp b/src/Interpreters/replaceForPositionalArguments.cpp index 3d60723a167..ee967f45c74 100644 --- a/src/Interpreters/replaceForPositionalArguments.cpp +++ b/src/Interpreters/replaceForPositionalArguments.cpp @@ -35,11 +35,11 @@ bool replaceForPositionalArguments(ASTPtr & argument, const ASTSelectQuery * sel if (which == Field::Types::UInt64) { - pos = ast_literal->value.get(); + pos = ast_literal->value.safeGet(); } else if (which == Field::Types::Int64) { - auto value = ast_literal->value.get(); + auto value = ast_literal->value.safeGet(); if (value > 0) pos = value; else diff --git a/src/Interpreters/tests/gtest_comparison_graph.cpp b/src/Interpreters/tests/gtest_comparison_graph.cpp index ac24a8de368..5f93bb983c1 100644 --- a/src/Interpreters/tests/gtest_comparison_graph.cpp +++ b/src/Interpreters/tests/gtest_comparison_graph.cpp @@ -29,7 +29,7 @@ TEST(ComparisonGraph, Bounds) const auto & [lower, strict] = *res; - ASSERT_EQ(lower.get(), 3); + ASSERT_EQ(lower.safeGet(), 3); ASSERT_TRUE(strict); } @@ -39,7 +39,7 @@ TEST(ComparisonGraph, Bounds) const auto & [upper, strict] = *res; - ASSERT_EQ(upper.get(), 7); + ASSERT_EQ(upper.safeGet(), 7); ASSERT_TRUE(strict); } diff --git a/src/Parsers/ASTFunction.cpp b/src/Parsers/ASTFunction.cpp index cd9e910d45a..d42728addb7 100644 --- a/src/Parsers/ASTFunction.cpp +++ b/src/Parsers/ASTFunction.cpp @@ -522,7 +522,7 @@ void ASTFunction::formatImplWithoutAlias(const FormatSettings & settings, Format if (tuple_arguments_valid && lit_right) { if (isInt64OrUInt64FieldType(lit_right->value.getType()) - && lit_right->value.get() >= 0) + && lit_right->value.safeGet() >= 0) { if (frame.need_parens) settings.ostr << '('; diff --git a/src/Parsers/ASTIndexDeclaration.h b/src/Parsers/ASTIndexDeclaration.h index dd05ad08184..72f3f017a99 100644 --- a/src/Parsers/ASTIndexDeclaration.h +++ b/src/Parsers/ASTIndexDeclaration.h @@ -13,8 +13,7 @@ class ASTIndexDeclaration : public IAST { public: static const auto DEFAULT_INDEX_GRANULARITY = 1uz; - static const auto DEFAULT_ANNOY_INDEX_GRANULARITY = 100'000'000uz; - static const auto DEFAULT_USEARCH_INDEX_GRANULARITY = 100'000'000uz; + static const auto DEFAULT_VECTOR_SIMILARITY_INDEX_GRANULARITY = 100'000'000uz; ASTIndexDeclaration(ASTPtr expression, ASTPtr type, const String & name_); diff --git a/src/Parsers/ASTLiteral.cpp b/src/Parsers/ASTLiteral.cpp index 8dedc5dc95d..515f4f0cb9f 100644 --- a/src/Parsers/ASTLiteral.cpp +++ b/src/Parsers/ASTLiteral.cpp @@ -73,8 +73,8 @@ void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const /// Special case for very large arrays and tuples. Instead of listing all elements, will use hash of them. /// (Otherwise column name will be too long, that will lead to significant slowdown of expression analysis.) auto type = value.getType(); - if ((type == Field::Types::Array && value.get().size() > min_elements_for_hashing) - || (type == Field::Types::Tuple && value.get().size() > min_elements_for_hashing)) + if ((type == Field::Types::Array && value.safeGet().size() > min_elements_for_hashing) + || (type == Field::Types::Tuple && value.safeGet().size() > min_elements_for_hashing)) { SipHash hash; applyVisitor(FieldVisitorHash(hash), value); @@ -92,7 +92,7 @@ void ASTLiteral::appendColumnNameImpl(WriteBuffer & ostr) const /// for tons of literals as it creates temporary String. if (value.getType() == Field::Types::String) { - writeQuoted(value.get(), ostr); + writeQuoted(value.safeGet(), ostr); } else { @@ -110,7 +110,7 @@ void ASTLiteral::appendColumnNameImplLegacy(WriteBuffer & ostr) const /// Special case for very large arrays. Instead of listing all elements, will use hash of them. /// (Otherwise column name will be too long, that will lead to significant slowdown of expression analysis.) auto type = value.getType(); - if ((type == Field::Types::Array && value.get().size() > min_elements_for_hashing)) + if ((type == Field::Types::Array && value.safeGet().size() > min_elements_for_hashing)) { SipHash hash; applyVisitor(FieldVisitorHash(hash), value); diff --git a/src/Parsers/ASTSystemQuery.cpp b/src/Parsers/ASTSystemQuery.cpp index a730ea0ba3d..7780544d5c2 100644 --- a/src/Parsers/ASTSystemQuery.cpp +++ b/src/Parsers/ASTSystemQuery.cpp @@ -198,6 +198,29 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s print_database_table(); } + if (sync_replica_mode != SyncReplicaMode::DEFAULT) + { + settings.ostr << ' '; + print_keyword(magic_enum::enum_name(sync_replica_mode)); + + // If the mode is LIGHTWEIGHT and specific source replicas are specified + if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty()) + { + settings.ostr << ' '; + print_keyword("FROM"); + settings.ostr << ' '; + + bool first = true; + for (const auto & src : src_replicas) + { + if (!first) + settings.ostr << ", "; + first = false; + settings.ostr << quoteString(src); + } + } + } + if (query_settings) { settings.ostr << (settings.hilite ? hilite_keyword : "") << settings.nl_or_ws << "SETTINGS " << (settings.hilite ? hilite_none : ""); @@ -233,28 +256,6 @@ void ASTSystemQuery::formatImpl(const FormatSettings & settings, FormatState & s print_identifier(disk); } - if (sync_replica_mode != SyncReplicaMode::DEFAULT) - { - settings.ostr << ' '; - print_keyword(magic_enum::enum_name(sync_replica_mode)); - - // If the mode is LIGHTWEIGHT and specific source replicas are specified - if (sync_replica_mode == SyncReplicaMode::LIGHTWEIGHT && !src_replicas.empty()) - { - settings.ostr << ' '; - print_keyword("FROM"); - settings.ostr << ' '; - - bool first = true; - for (const auto & src : src_replicas) - { - if (!first) - settings.ostr << ", "; - first = false; - settings.ostr << quoteString(src); - } - } - } break; } case Type::SYNC_DATABASE_REPLICA: diff --git a/src/Parsers/Access/ASTGrantQuery.cpp b/src/Parsers/Access/ASTGrantQuery.cpp index f60fa7e4a23..eac88c75513 100644 --- a/src/Parsers/Access/ASTGrantQuery.cpp +++ b/src/Parsers/Access/ASTGrantQuery.cpp @@ -97,24 +97,9 @@ namespace void formatCurrentGrantsElements(const AccessRightsElements & elements, const IAST::FormatSettings & settings) { - for (size_t i = 0; i != elements.size(); ++i) - { - const auto & element = elements[i]; - - bool next_element_on_same_db_and_table = false; - if (i != elements.size() - 1) - { - const auto & next_element = elements[i + 1]; - if (element.sameDatabaseAndTableAndParameter(next_element)) - next_element_on_same_db_and_table = true; - } - - if (!next_element_on_same_db_and_table) - { - settings.ostr << " "; - formatONClause(element, settings); - } - } + settings.ostr << "("; + formatElementsWithoutOptions(elements, settings); + settings.ostr << ")"; } } diff --git a/src/Parsers/Access/ParserCreateQuotaQuery.cpp b/src/Parsers/Access/ParserCreateQuotaQuery.cpp index ddfdbe38903..ddf4e9ecda5 100644 --- a/src/Parsers/Access/ParserCreateQuotaQuery.cpp +++ b/src/Parsers/Access/ParserCreateQuotaQuery.cpp @@ -114,7 +114,7 @@ namespace T fieldToNumber(const Field & f) { if (f.getType() == Field::Types::String) - return parseWithSizeSuffix(boost::algorithm::trim_copy(f.get())); + return parseWithSizeSuffix(boost::algorithm::trim_copy(f.safeGet())); else return applyVisitor(FieldVisitorConvertToNumber(), f); } diff --git a/src/Parsers/ExpressionElementParsers.cpp b/src/Parsers/ExpressionElementParsers.cpp index 9927acdcf17..de395d120d7 100644 --- a/src/Parsers/ExpressionElementParsers.cpp +++ b/src/Parsers/ExpressionElementParsers.cpp @@ -314,7 +314,7 @@ bool ParserCompoundIdentifier::parseImpl(Pos & pos, ASTPtr & node, Expected & ex ASTPtr ast_uuid; if (!uuid_p.parse(pos, ast_uuid, expected)) return false; - uuid = parseFromString(ast_uuid->as()->value.get()); + uuid = parseFromString(ast_uuid->as()->value.safeGet()); } if (parts.size() == 1) node = std::make_shared(parts[0], std::move(params)); @@ -1626,7 +1626,7 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e if (!parser_string_literal.parse(pos, ast_prefix_name, expected)) return false; - column_name_prefix = ast_prefix_name->as().value.get(); + column_name_prefix = ast_prefix_name->as().value.safeGet(); } if (with_open_round_bracket) @@ -1689,7 +1689,7 @@ bool ParserColumnsTransformers::parseImpl(Pos & pos, ASTPtr & node, Expected & e auto res = std::make_shared(); if (regexp_node) - res->setPattern(regexp_node->as().value.get()); + res->setPattern(regexp_node->as().value.safeGet()); else res->children = std::move(identifiers); res->is_strict = is_strict; @@ -1861,7 +1861,7 @@ static bool parseColumnsMatcherBody(IParser::Pos & pos, ASTPtr & node, Expected else { auto regexp_matcher = std::make_shared(); - regexp_matcher->setPattern(regexp_node->as().value.get()); + regexp_matcher->setPattern(regexp_node->as().value.safeGet()); if (!transformers->children.empty()) { @@ -2310,7 +2310,7 @@ bool ParserTTLElement::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) if (!parser_string_literal.parse(pos, ast_space_name, expected)) return false; - destination_name = ast_space_name->as().value.get(); + destination_name = ast_space_name->as().value.safeGet(); } else if (mode == TTLMode::GROUP_BY) { diff --git a/src/Parsers/ParserAlterQuery.cpp b/src/Parsers/ParserAlterQuery.cpp index dbefb0cb966..73fd563faf6 100644 --- a/src/Parsers/ParserAlterQuery.cpp +++ b/src/Parsers/ParserAlterQuery.cpp @@ -517,7 +517,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_space_name, expected)) return false; - command->move_destination_name = ast_space_name->as().value.get(); + command->move_destination_name = ast_space_name->as().value.safeGet(); } else if (s_move_partition.ignore(pos, expected)) { @@ -545,7 +545,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_space_name, expected)) return false; - command->move_destination_name = ast_space_name->as().value.get(); + command->move_destination_name = ast_space_name->as().value.safeGet(); } } else if (s_add_constraint.ignore(pos, expected)) @@ -638,7 +638,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_from, expected)) return false; - command->from = ast_from->as().value.get(); + command->from = ast_from->as().value.safeGet(); command->type = ASTAlterCommand::FETCH_PARTITION; } else if (s_fetch_part.ignore(pos, expected)) @@ -652,7 +652,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected ASTPtr ast_from; if (!parser_string_literal.parse(pos, ast_from, expected)) return false; - command->from = ast_from->as().value.get(); + command->from = ast_from->as().value.safeGet(); command->part = true; command->type = ASTAlterCommand::FETCH_PARTITION; } @@ -680,7 +680,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_with_name, expected)) return false; - command->with_name = ast_with_name->as().value.get(); + command->with_name = ast_with_name->as().value.safeGet(); } } else if (s_unfreeze.ignore(pos, expected)) @@ -707,7 +707,7 @@ bool ParserAlterCommand::parseImpl(Pos & pos, ASTPtr & node, Expected & expected if (!parser_string_literal.parse(pos, ast_with_name, expected)) return false; - command->with_name = ast_with_name->as().value.get(); + command->with_name = ast_with_name->as().value.safeGet(); } else { diff --git a/src/Parsers/ParserCheckQuery.cpp b/src/Parsers/ParserCheckQuery.cpp index 42716ba7f2c..33b6a5a1ac2 100644 --- a/src/Parsers/ParserCheckQuery.cpp +++ b/src/Parsers/ParserCheckQuery.cpp @@ -55,7 +55,7 @@ bool ParserCheckQuery::parseCheckTable(Pos & pos, ASTPtr & node, Expected & expe const auto * ast_literal = ast_part_name->as(); if (!ast_literal || ast_literal->value.getType() != Field::Types::String) return false; - query->part_name = ast_literal->value.get(); + query->part_name = ast_literal->value.safeGet(); } if (query->database) diff --git a/src/Parsers/ParserCreateIndexQuery.cpp b/src/Parsers/ParserCreateIndexQuery.cpp index 9ebee4cc852..ed89b80edca 100644 --- a/src/Parsers/ParserCreateIndexQuery.cpp +++ b/src/Parsers/ParserCreateIndexQuery.cpp @@ -89,10 +89,8 @@ bool ParserCreateIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected else { auto index_type = index->getType(); - if (index_type && index_type->name == "annoy") - index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY; - else if (index_type && index_type->name == "usearch") - index->granularity = ASTIndexDeclaration::DEFAULT_USEARCH_INDEX_GRANULARITY; + if (index_type && index_type->name == "vector_similarity") + index->granularity = ASTIndexDeclaration::DEFAULT_VECTOR_SIMILARITY_INDEX_GRANULARITY; else index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY; } diff --git a/src/Parsers/ParserCreateQuery.cpp b/src/Parsers/ParserCreateQuery.cpp index 66965903ab0..cc4e02f46a3 100644 --- a/src/Parsers/ParserCreateQuery.cpp +++ b/src/Parsers/ParserCreateQuery.cpp @@ -214,10 +214,8 @@ bool ParserIndexDeclaration::parseImpl(Pos & pos, ASTPtr & node, Expected & expe else { auto index_type = index->getType(); - if (index_type->name == "annoy") - index->granularity = ASTIndexDeclaration::DEFAULT_ANNOY_INDEX_GRANULARITY; - else if (index_type->name == "usearch") - index->granularity = ASTIndexDeclaration::DEFAULT_USEARCH_INDEX_GRANULARITY; + if (index_type->name == "vector_similarity") + index->granularity = ASTIndexDeclaration::DEFAULT_VECTOR_SIMILARITY_INDEX_GRANULARITY; else index->granularity = ASTIndexDeclaration::DEFAULT_INDEX_GRANULARITY; } @@ -931,7 +929,7 @@ bool ParserCreateTableQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & expe query->is_create_empty = is_create_empty; if (from_path) - query->attach_from_path = from_path->as().value.get(); + query->attach_from_path = from_path->as().value.safeGet(); return true; } @@ -1440,7 +1438,7 @@ bool ParserCreateDatabaseQuery::parseImpl(Pos & pos, ASTPtr & node, Expected & e ASTPtr ast_uuid; if (!uuid_p.parse(pos, ast_uuid, expected)) return false; - uuid = parseFromString(ast_uuid->as()->value.get()); + uuid = parseFromString(ast_uuid->as()->value.safeGet()); } if (s_on.ignore(pos, expected)) diff --git a/src/Parsers/ParserDictionary.cpp b/src/Parsers/ParserDictionary.cpp index 83a006231d9..ce38d1b54d1 100644 --- a/src/Parsers/ParserDictionary.cpp +++ b/src/Parsers/ParserDictionary.cpp @@ -33,7 +33,7 @@ bool ParserDictionaryLifetime::parseImpl(Pos & pos, ASTPtr & node, Expected & ex if (literal.value.getType() != Field::Types::UInt64) return false; - res->max_sec = literal.value.get(); + res->max_sec = literal.value.safeGet(); node = res; return true; } @@ -58,10 +58,10 @@ bool ParserDictionaryLifetime::parseImpl(Pos & pos, ASTPtr & node, Expected & ex return false; if (pair.first == "min") - res->min_sec = literal->value.get(); + res->min_sec = literal->value.safeGet(); else if (pair.first == "max") { - res->max_sec = literal->value.get(); + res->max_sec = literal->value.safeGet(); initialized_max = true; } else diff --git a/src/Parsers/ParserPartition.cpp b/src/Parsers/ParserPartition.cpp index 80a28f4803e..ab97b3d0e3b 100644 --- a/src/Parsers/ParserPartition.cpp +++ b/src/Parsers/ParserPartition.cpp @@ -65,7 +65,7 @@ bool ParserPartition::parseImpl(Pos & pos, ASTPtr & node, Expected & expected) { if (literal_ast->value.getType() == Field::Types::Tuple) { - fields_count = literal_ast->value.get().size(); + fields_count = literal_ast->value.safeGet().size(); } else { diff --git a/src/Parsers/ParserSystemQuery.cpp b/src/Parsers/ParserSystemQuery.cpp index 0545c3e5568..81b64ab47c6 100644 --- a/src/Parsers/ParserSystemQuery.cpp +++ b/src/Parsers/ParserSystemQuery.cpp @@ -445,7 +445,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & ASTPtr ast; if (!ParserStringLiteral{}.parse(pos, ast, expected)) return false; - String time_str = ast->as().value.get(); + String time_str = ast->as().value.safeGet(); ReadBufferFromString buf(time_str); time_t time; readDateTimeText(time, buf); @@ -467,7 +467,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & return false; } - res->seconds = seconds->as()->value.get(); + res->seconds = seconds->as()->value.safeGet(); break; } case Type::DROP_FILESYSTEM_CACHE: @@ -538,7 +538,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & ASTPtr ast; if (ParserKeyword{Keyword::WITH_NAME}.ignore(pos, expected) && ParserStringLiteral{}.parse(pos, ast, expected)) { - res->backup_name = ast->as().value.get(); + res->backup_name = ast->as().value.safeGet(); } else { @@ -577,7 +577,7 @@ bool ParserSystemQuery::parseImpl(IParser::Pos & pos, ASTPtr & node, Expected & if (!ParserStringLiteral{}.parse(pos, ast, expected)) return false; - custom_name = ast->as().value.get(); + custom_name = ast->as().value.safeGet(); } return true; diff --git a/src/Parsers/ParserUndropQuery.cpp b/src/Parsers/ParserUndropQuery.cpp index 07ca8a3b5fd..57da47df70d 100644 --- a/src/Parsers/ParserUndropQuery.cpp +++ b/src/Parsers/ParserUndropQuery.cpp @@ -41,7 +41,7 @@ bool parseUndropQuery(IParser::Pos & pos, ASTPtr & node, Expected & expected) ASTPtr ast_uuid; if (!uuid_p.parse(pos, ast_uuid, expected)) return false; - uuid = parseFromString(ast_uuid->as()->value.get()); + uuid = parseFromString(ast_uuid->as()->value.safeGet()); } if (ParserKeyword{Keyword::ON}.ignore(pos, expected)) { diff --git a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt index 20fd951d390..74fdcff79f7 100644 --- a/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt +++ b/src/Parsers/fuzzers/codegen_fuzzer/CMakeLists.txt @@ -39,7 +39,7 @@ set(CMAKE_INCLUDE_CURRENT_DIR TRUE) clickhouse_add_executable(codegen_select_fuzzer ${FUZZER_SRCS}) -set_source_files_properties("${PROTO_SRCS}" "out.cpp" PROPERTIES COMPILE_FLAGS "-Wno-reserved-identifier") +set_source_files_properties("${PROTO_SRCS}" "out.cpp" PROPERTIES COMPILE_FLAGS "-Wno-reserved-identifier -Wno-extra-semi-stmt -Wno-used-but-marked-unused") # contrib/libprotobuf-mutator/src/libfuzzer/libfuzzer_macro.h:143:44: error: no newline at end of file [-Werror,-Wnewline-eof] target_compile_options (codegen_select_fuzzer PRIVATE -Wno-newline-eof) diff --git a/src/Parsers/tests/gtest_dictionary_parser.cpp b/src/Parsers/tests/gtest_dictionary_parser.cpp index a1ba46125a7..af3591750a1 100644 --- a/src/Parsers/tests/gtest_dictionary_parser.cpp +++ b/src/Parsers/tests/gtest_dictionary_parser.cpp @@ -56,21 +56,21 @@ TEST(ParserDictionaryDDL, SimpleDictionary) EXPECT_EQ(create->dictionary->source->name, "clickhouse"); auto children = create->dictionary->source->elements->children; EXPECT_EQ(children[0]->as() -> first, "host"); - EXPECT_EQ(children[0]->as()->second->as()->value.get(), "localhost"); + EXPECT_EQ(children[0]->as()->second->as()->value.safeGet(), "localhost"); EXPECT_EQ(children[1]->as()->first, "port"); - EXPECT_EQ(children[1]->as()->second->as()->value.get(), 9000); + EXPECT_EQ(children[1]->as()->second->as()->value.safeGet(), 9000); EXPECT_EQ(children[2]->as()->first, "user"); - EXPECT_EQ(children[2]->as()->second->as()->value.get(), "default"); + EXPECT_EQ(children[2]->as()->second->as()->value.safeGet(), "default"); EXPECT_EQ(children[3]->as()->first, "password"); - EXPECT_EQ(children[3]->as()->second->as()->value.get(), ""); + EXPECT_EQ(children[3]->as()->second->as()->value.safeGet(), ""); EXPECT_EQ(children[4]->as()->first, "db"); - EXPECT_EQ(children[4]->as()->second->as()->value.get(), "test"); + EXPECT_EQ(children[4]->as()->second->as()->value.safeGet(), "test"); EXPECT_EQ(children[5]->as()->first, "table"); - EXPECT_EQ(children[5]->as()->second->as()->value.get(), "table_for_dict"); + EXPECT_EQ(children[5]->as()->second->as()->value.safeGet(), "table_for_dict"); /// layout test auto * layout = create->dictionary->layout; @@ -102,9 +102,9 @@ TEST(ParserDictionaryDDL, SimpleDictionary) EXPECT_EQ(attributes_children[1]->as()->name, "second_column"); EXPECT_EQ(attributes_children[2]->as()->name, "third_column"); - EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.get(), 0); - EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.get(), 1); - EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.get(), 2); + EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.safeGet(), 0); + EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.safeGet(), 1); + EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.safeGet(), 2); EXPECT_EQ(attributes_children[0]->as()->expression, nullptr); EXPECT_EQ(attributes_children[1]->as()->expression, nullptr); @@ -150,8 +150,8 @@ TEST(ParserDictionaryDDL, AttributesWithMultipleProperties) EXPECT_EQ(attributes_children[2]->as()->name, "third_column"); EXPECT_EQ(attributes_children[0]->as()->default_value, nullptr); - EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.get(), 1); - EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.get(), 2); + EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.safeGet(), 1); + EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.safeGet(), 2); EXPECT_EQ(attributes_children[0]->as()->expression, nullptr); EXPECT_EQ(attributes_children[1]->as()->expression, nullptr); @@ -195,9 +195,9 @@ TEST(ParserDictionaryDDL, CustomAttributePropertiesOrder) EXPECT_EQ(attributes_children[1]->as()->name, "second_column"); EXPECT_EQ(attributes_children[2]->as()->name, "third_column"); - EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.get(), 100); - EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.get(), 1); - EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.get(), 2); + EXPECT_EQ(attributes_children[0]->as()->default_value->as()->value.safeGet(), 100); + EXPECT_EQ(attributes_children[1]->as()->default_value->as()->value.safeGet(), 1); + EXPECT_EQ(attributes_children[2]->as()->default_value->as()->value.safeGet(), 2); EXPECT_EQ(attributes_children[0]->as()->expression, nullptr); EXPECT_EQ(attributes_children[1]->as()->expression, nullptr); @@ -248,25 +248,25 @@ TEST(ParserDictionaryDDL, NestedSource) auto children = create->dictionary->source->elements->children; EXPECT_EQ(children[0]->as()->first, "host"); - EXPECT_EQ(children[0]->as()->second->as()->value.get(), "localhost"); + EXPECT_EQ(children[0]->as()->second->as()->value.safeGet(), "localhost"); EXPECT_EQ(children[1]->as()->first, "port"); - EXPECT_EQ(children[1]->as()->second->as()->value.get(), 9000); + EXPECT_EQ(children[1]->as()->second->as()->value.safeGet(), 9000); EXPECT_EQ(children[2]->as()->first, "user"); - EXPECT_EQ(children[2]->as()->second->as()->value.get(), "default"); + EXPECT_EQ(children[2]->as()->second->as()->value.safeGet(), "default"); EXPECT_EQ(children[3]->as()->first, "replica"); auto replica = children[3]->as()->second->children; EXPECT_EQ(replica[0]->as()->first, "host"); - EXPECT_EQ(replica[0]->as()->second->as()->value.get(), "127.0.0.1"); + EXPECT_EQ(replica[0]->as()->second->as()->value.safeGet(), "127.0.0.1"); EXPECT_EQ(replica[1]->as()->first, "priority"); - EXPECT_EQ(replica[1]->as()->second->as()->value.get(), 1); + EXPECT_EQ(replica[1]->as()->second->as()->value.safeGet(), 1); EXPECT_EQ(children[4]->as()->first, "password"); - EXPECT_EQ(children[4]->as()->second->as()->value.get(), ""); + EXPECT_EQ(children[4]->as()->second->as()->value.safeGet(), ""); } diff --git a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp index 58bf4c1a2fc..30301b242db 100644 --- a/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp +++ b/src/Processors/Formats/Impl/CHColumnToArrowColumn.cpp @@ -185,7 +185,7 @@ namespace DB } else { - auto value = static_cast(column[value_i].get>().getValue()); + auto value = static_cast(column[value_i].safeGet>().getValue()); if (need_rescale) { if (common::mulOverflow(value, rescale_multiplier, value)) diff --git a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp index 06e8668cd7c..566a036d79c 100644 --- a/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp +++ b/src/Processors/Formats/Impl/ConstantExpressionTemplate.cpp @@ -208,20 +208,20 @@ private: /// Do not replace empty array and array of NULLs if (literal->value.getType() == Field::Types::Array) { - const Array & array = literal->value.get(); + const Array & array = literal->value.safeGet(); auto not_null = std::find_if_not(array.begin(), array.end(), [](const auto & elem) { return elem.isNull(); }); if (not_null == array.end()) return true; } else if (literal->value.getType() == Field::Types::Map) { - const Map & map = literal->value.get(); + const Map & map = literal->value.safeGet(); if (map.size() % 2) return false; } else if (literal->value.getType() == Field::Types::Tuple) { - const Tuple & tuple = literal->value.get(); + const Tuple & tuple = literal->value.safeGet(); for (const auto & value : tuple) if (value.isNull()) diff --git a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp index 649721f28bf..58bec8120f1 100644 --- a/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/NativeORCBlockInputFormat.cpp @@ -262,7 +262,7 @@ convertFieldToORCLiteral(const orc::Type & orc_type, const Field & field, DataTy { case orc::BOOLEAN: { /// May throw exception - auto val = field.get(); + auto val = field.safeGet(); return orc::Literal(val != 0); } case orc::BYTE: @@ -275,7 +275,7 @@ convertFieldToORCLiteral(const orc::Type & orc_type, const Field & field, DataTy /// SELECT * FROM file('t.orc', ORC, 'x UInt8') WHERE x > 10 /// We have to reject this, otherwise it would miss values > 127 (because /// they're treated as negative by ORC). - auto val = field.get(); + auto val = field.safeGet(); return orc::Literal(val); } case orc::FLOAT: diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp index 6f543a05fba..4a7a23158ff 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -203,25 +204,15 @@ template void ORCBlockOutputFormat::writeNumbers( orc::ColumnVectorBatch & orc_column, const IColumn & column, - const PaddedPODArray * null_bytemap, + const PaddedPODArray * /*null_bytemap*/, ConvertFunc convert) { NumberVectorBatch & number_orc_column = dynamic_cast(orc_column); const auto & number_column = assert_cast &>(column); - number_orc_column.resize(number_column.size()); + number_orc_column.data.resize(number_column.size()); for (size_t i = 0; i != number_column.size(); ++i) - { - if (null_bytemap && (*null_bytemap)[i]) - { - number_orc_column.notNull[i] = 0; - continue; - } - - number_orc_column.notNull[i] = 1; number_orc_column.data[i] = convert(number_column.getElement(i)); - } - number_orc_column.numElements = number_column.size(); } template @@ -229,7 +220,7 @@ void ORCBlockOutputFormat::writeDecimals( orc::ColumnVectorBatch & orc_column, const IColumn & column, DataTypePtr & type, - const PaddedPODArray * null_bytemap, + const PaddedPODArray * /*null_bytemap*/, ConvertFunc convert) { DecimalVectorBatch & decimal_orc_column = dynamic_cast(orc_column); @@ -238,71 +229,49 @@ void ORCBlockOutputFormat::writeDecimals( decimal_orc_column.precision = decimal_type->getPrecision(); decimal_orc_column.scale = decimal_type->getScale(); decimal_orc_column.resize(decimal_column.size()); - for (size_t i = 0; i != decimal_column.size(); ++i) - { - if (null_bytemap && (*null_bytemap)[i]) - { - decimal_orc_column.notNull[i] = 0; - continue; - } - decimal_orc_column.notNull[i] = 1; + decimal_orc_column.values.resize(decimal_column.size()); + for (size_t i = 0; i != decimal_column.size(); ++i) decimal_orc_column.values[i] = convert(decimal_column.getElement(i).value); - } - decimal_orc_column.numElements = decimal_column.size(); } template void ORCBlockOutputFormat::writeStrings( orc::ColumnVectorBatch & orc_column, const IColumn & column, - const PaddedPODArray * null_bytemap) + const PaddedPODArray * /*null_bytemap*/) { orc::StringVectorBatch & string_orc_column = dynamic_cast(orc_column); const auto & string_column = assert_cast(column); - string_orc_column.resize(string_column.size()); + string_orc_column.data.resize(string_column.size()); + string_orc_column.length.resize(string_column.size()); for (size_t i = 0; i != string_column.size(); ++i) { - if (null_bytemap && (*null_bytemap)[i]) - { - string_orc_column.notNull[i] = 0; - continue; - } - - string_orc_column.notNull[i] = 1; const std::string_view & string = string_column.getDataAt(i).toView(); string_orc_column.data[i] = const_cast(string.data()); string_orc_column.length[i] = string.size(); } - string_orc_column.numElements = string_column.size(); } template void ORCBlockOutputFormat::writeDateTimes( orc::ColumnVectorBatch & orc_column, const IColumn & column, - const PaddedPODArray * null_bytemap, + const PaddedPODArray * /*null_bytemap*/, GetSecondsFunc get_seconds, GetNanosecondsFunc get_nanoseconds) { orc::TimestampVectorBatch & timestamp_orc_column = dynamic_cast(orc_column); const auto & timestamp_column = assert_cast(column); - timestamp_orc_column.resize(timestamp_column.size()); + timestamp_orc_column.data.resize(timestamp_column.size()); + timestamp_orc_column.nanoseconds.resize(timestamp_column.size()); for (size_t i = 0; i != timestamp_column.size(); ++i) { - if (null_bytemap && (*null_bytemap)[i]) - { - timestamp_orc_column.notNull[i] = 0; - continue; - } - - timestamp_orc_column.notNull[i] = 1; timestamp_orc_column.data[i] = static_cast(get_seconds(timestamp_column.getElement(i))); timestamp_orc_column.nanoseconds[i] = static_cast(get_nanoseconds(timestamp_column.getElement(i))); } - timestamp_orc_column.numElements = timestamp_column.size(); } void ORCBlockOutputFormat::writeColumn( @@ -311,9 +280,27 @@ void ORCBlockOutputFormat::writeColumn( DataTypePtr & type, const PaddedPODArray * null_bytemap) { - orc_column.notNull.resize(column.size()); + size_t rows = column.size(); + orc_column.resize(rows); + orc_column.numElements = rows; + + /// Calculate orc_column.hasNulls if (null_bytemap) - orc_column.hasNulls = true; + orc_column.hasNulls = !memoryIsZero(null_bytemap->data(), 0, null_bytemap->size()); + else + orc_column.hasNulls = false; + + /// Fill orc_column.notNull + if (orc_column.hasNulls) + { + for (size_t i = 0; i < rows; ++i) + orc_column.notNull[i] = !(*null_bytemap)[i]; + } + else + { + for (size_t i = 0; i < rows; ++i) + orc_column.notNull[i] = 1; + } /// ORC doesn't have unsigned types, so cast everything to signed and sign-extend to Int64 to /// make the ORC library calculate min and max correctly. @@ -471,6 +458,7 @@ void ORCBlockOutputFormat::writeColumn( } case TypeIndex::Nullable: { + chassert(!null_bytemap); const auto & nullable_column = assert_cast(column); const PaddedPODArray & new_null_bytemap = assert_cast &>(*nullable_column.getNullMapColumnPtr()).getData(); auto nested_type = removeNullable(type); @@ -485,19 +473,15 @@ void ORCBlockOutputFormat::writeColumn( const ColumnArray::Offsets & offsets = list_column.getOffsets(); size_t column_size = list_column.size(); - list_orc_column.resize(column_size); + list_orc_column.offsets.resize(column_size + 1); /// The length of list i in ListVectorBatch is offsets[i+1] - offsets[i]. list_orc_column.offsets[0] = 0; for (size_t i = 0; i != column_size; ++i) - { list_orc_column.offsets[i + 1] = offsets[i]; - list_orc_column.notNull[i] = 1; - } orc::ColumnVectorBatch & nested_orc_column = *list_orc_column.elements; - writeColumn(nested_orc_column, list_column.getData(), nested_type, null_bytemap); - list_orc_column.numElements = column_size; + writeColumn(nested_orc_column, list_column.getData(), nested_type, nullptr); break; } case TypeIndex::Tuple: @@ -505,10 +489,8 @@ void ORCBlockOutputFormat::writeColumn( orc::StructVectorBatch & struct_orc_column = dynamic_cast(orc_column); const auto & tuple_column = assert_cast(column); auto nested_types = assert_cast(type.get())->getElements(); - for (size_t i = 0; i != tuple_column.size(); ++i) - struct_orc_column.notNull[i] = 1; for (size_t i = 0; i != tuple_column.tupleSize(); ++i) - writeColumn(*struct_orc_column.fields[i], tuple_column.getColumn(i), nested_types[i], null_bytemap); + writeColumn(*struct_orc_column.fields[i], tuple_column.getColumn(i), nested_types[i], nullptr); break; } case TypeIndex::Map: @@ -520,25 +502,21 @@ void ORCBlockOutputFormat::writeColumn( size_t column_size = list_column.size(); - map_orc_column.resize(list_column.size()); + map_orc_column.offsets.resize(column_size + 1); /// The length of list i in ListVectorBatch is offsets[i+1] - offsets[i]. map_orc_column.offsets[0] = 0; for (size_t i = 0; i != column_size; ++i) - { map_orc_column.offsets[i + 1] = offsets[i]; - map_orc_column.notNull[i] = 1; - } + const auto nested_columns = assert_cast(list_column.getDataPtr().get())->getColumns(); orc::ColumnVectorBatch & keys_orc_column = *map_orc_column.keys; auto key_type = map_type.getKeyType(); - writeColumn(keys_orc_column, *nested_columns[0], key_type, null_bytemap); + writeColumn(keys_orc_column, *nested_columns[0], key_type, nullptr); orc::ColumnVectorBatch & values_orc_column = *map_orc_column.elements; auto value_type = map_type.getValueType(); - writeColumn(values_orc_column, *nested_columns[1], value_type, null_bytemap); - - map_orc_column.numElements = column_size; + writeColumn(values_orc_column, *nested_columns[1], value_type, nullptr); break; } default: @@ -546,27 +524,6 @@ void ORCBlockOutputFormat::writeColumn( } } -size_t ORCBlockOutputFormat::getColumnSize(const IColumn & column, DataTypePtr & type) -{ - if (type->getTypeId() == TypeIndex::Array) - { - auto nested_type = assert_cast(*type).getNestedType(); - const IColumn & nested_column = assert_cast(column).getData(); - return std::max(column.size(), getColumnSize(nested_column, nested_type)); - } - - return column.size(); -} - -size_t ORCBlockOutputFormat::getMaxColumnSize(Chunk & chunk) -{ - size_t columns_num = chunk.getNumColumns(); - size_t max_column_size = 0; - for (size_t i = 0; i != columns_num; ++i) - max_column_size = std::max(max_column_size, getColumnSize(*chunk.getColumns()[i], data_types[i])); - return max_column_size; -} - void ORCBlockOutputFormat::consume(Chunk chunk) { if (!writer) @@ -575,10 +532,7 @@ void ORCBlockOutputFormat::consume(Chunk chunk) size_t columns_num = chunk.getNumColumns(); size_t rows_num = chunk.getNumRows(); - /// getMaxColumnSize is needed to write arrays. - /// The size of the batch must be no less than total amount of array elements - /// and no less than the number of rows (ORC writes a null bit for every row). - std::unique_ptr batch = writer->createRowBatch(getMaxColumnSize(chunk)); + std::unique_ptr batch = writer->createRowBatch(chunk.getNumRows()); orc::StructVectorBatch & root = dynamic_cast(*batch); auto columns = chunk.detachColumns(); diff --git a/src/Processors/Formats/Impl/ORCBlockOutputFormat.h b/src/Processors/Formats/Impl/ORCBlockOutputFormat.h index 28837193d1a..06ecac9b820 100644 --- a/src/Processors/Formats/Impl/ORCBlockOutputFormat.h +++ b/src/Processors/Formats/Impl/ORCBlockOutputFormat.h @@ -69,11 +69,6 @@ private: void writeColumn(orc::ColumnVectorBatch & orc_column, const IColumn & column, DataTypePtr & type, const PaddedPODArray * null_bytemap); - /// These two functions are needed to know maximum nested size of arrays to - /// create an ORC Batch with the appropriate size - size_t getColumnSize(const IColumn & column, DataTypePtr & type); - size_t getMaxColumnSize(Chunk & chunk); - void prepareWriter(); const FormatSettings format_settings; diff --git a/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp b/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp index 3578401a0f8..b43c195f201 100644 --- a/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp +++ b/src/Processors/Formats/Impl/PrometheusTextOutputFormat.cpp @@ -286,10 +286,10 @@ static void columnMapToContainer(const ColumnMap * col_map, size_t row_num, Cont { Field field; col_map->get(row_num, field); - const auto & map_field = field.get(); + const auto & map_field = field.safeGet(); for (const auto & map_element : map_field) { - const auto & map_entry = map_element.get(); + const auto & map_entry = map_element.safeGet(); String entry_key; String entry_value; diff --git a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp index de34a8aa04f..9839f64b947 100644 --- a/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp +++ b/src/Processors/Formats/Impl/ValuesBlockInputFormat.cpp @@ -333,7 +333,7 @@ namespace { const DataTypeTuple & type_tuple = static_cast(data_type); - Tuple & tuple_value = value.get(); + Tuple & tuple_value = value.safeGet(); size_t src_tuple_size = tuple_value.size(); size_t dst_tuple_size = type_tuple.getElements().size(); @@ -360,7 +360,7 @@ namespace if (element_type.isNullable()) return; - Array & array_value = value.get(); + Array & array_value = value.safeGet(); size_t array_value_size = array_value.size(); for (size_t i = 0; i < array_value_size; ++i) @@ -378,12 +378,12 @@ namespace const auto & key_type = *type_map.getKeyType(); const auto & value_type = *type_map.getValueType(); - auto & map = value.get(); + auto & map = value.safeGet(); size_t map_size = map.size(); for (size_t i = 0; i < map_size; ++i) { - auto & map_entry = map[i].get(); + auto & map_entry = map[i].safeGet(); auto & entry_key = map_entry[0]; auto & entry_value = map_entry[1]; diff --git a/src/Processors/Merges/AggregatingSortedTransform.h b/src/Processors/Merges/AggregatingSortedTransform.h index c6d7e844c65..c96ad3db525 100644 --- a/src/Processors/Merges/AggregatingSortedTransform.h +++ b/src/Processors/Merges/AggregatingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event AggregatingSortedMilliseconds; +} + namespace DB { @@ -29,6 +34,11 @@ public: } String getName() const override { return "AggregatingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::AggregatingSortedMilliseconds, "Aggregated sorted", getLogger("AggregatingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h index 53c103e7038..908994e1851 100644 --- a/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/AggregatingSortedAlgorithm.h @@ -30,6 +30,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } + /// Stores information for aggregation of SimpleAggregateFunction columns struct SimpleAggregateDescription { diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp index 86675bcb237..477566d8a94 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.cpp @@ -126,6 +126,9 @@ IMergingAlgorithm::Status FinishAggregatingInOrderAlgorithm::merge() Chunk FinishAggregatingInOrderAlgorithm::prepareToMerge() { + total_merged_rows += accumulated_rows; + total_merged_bytes += accumulated_bytes; + accumulated_rows = 0; accumulated_bytes = 0; diff --git a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h index cc6578e79be..c34028b1cba 100644 --- a/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h +++ b/src/Processors/Merges/Algorithms/FinishAggregatingInOrderAlgorithm.h @@ -50,6 +50,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return {.bytes = accumulated_bytes, .rows = accumulated_rows, .blocks = chunk_num}; } + private: Chunk prepareToMerge(); void addToAggregation(); @@ -92,6 +94,9 @@ private: UInt64 chunk_num = 0; size_t accumulated_rows = 0; size_t accumulated_bytes = 0; + + size_t total_merged_rows = 0; + size_t total_merged_bytes = 0; }; } diff --git a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h index aaa3859efb6..cb2775c968d 100644 --- a/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/GraphiteRollupSortedAlgorithm.h @@ -33,6 +33,8 @@ public: const char * getName() const override { return "GraphiteRollupSortedAlgorithm"; } Status merge() override; + MergedStats getMergedStats() const override { return merged_data->getMergedStats(); } + struct ColumnsDefinition { size_t path_column_num; diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithm.h b/src/Processors/Merges/Algorithms/IMergingAlgorithm.h index 9a1c7c24270..83f11232b71 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithm.h +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithm.h @@ -1,7 +1,7 @@ #pragma once #include -#include +#include namespace DB { @@ -65,6 +65,15 @@ public: IMergingAlgorithm() = default; virtual ~IMergingAlgorithm() = default; + + struct MergedStats + { + UInt64 bytes = 0; + UInt64 rows = 0; + UInt64 blocks = 0; + }; + + virtual MergedStats getMergedStats() const = 0; }; // TODO: use when compile with clang which could support it diff --git a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h index bc1aafe93f7..1725108ac5d 100644 --- a/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h +++ b/src/Processors/Merges/Algorithms/IMergingAlgorithmWithSharedChunks.h @@ -16,6 +16,8 @@ public: void initialize(Inputs inputs) override; void consume(Input & input, size_t source_num) override; + MergedStats getMergedStats() const override { return merged_data->getMergedStats(); } + private: Block header; SortDescription description; diff --git a/src/Processors/Merges/Algorithms/MergedData.h b/src/Processors/Merges/Algorithms/MergedData.h index c5bb074bb0c..8f47f89d8ee 100644 --- a/src/Processors/Merges/Algorithms/MergedData.h +++ b/src/Processors/Merges/Algorithms/MergedData.h @@ -183,6 +183,8 @@ public: UInt64 totalAllocatedBytes() const { return total_allocated_bytes; } UInt64 maxBlockSize() const { return max_block_size; } + IMergingAlgorithm::MergedStats getMergedStats() const { return {.bytes = total_allocated_bytes, .rows = total_merged_rows, .blocks = total_chunks}; } + virtual ~MergedData() = default; protected: diff --git a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h index bcb111baadf..c889668a38e 100644 --- a/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/MergingSortedAlgorithm.h @@ -31,7 +31,7 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; - const MergedData & getMergedData() const { return merged_data; } + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } private: Block header; diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp index e2c6371c44f..80c00f91d82 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.cpp @@ -127,14 +127,14 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc, Row right(left.size()); for (size_t col_num : desc.key_col_nums) - right[col_num] = (*raw_columns[col_num])[row_number].template get(); + right[col_num] = (*raw_columns[col_num])[row_number].template safeGet(); for (size_t col_num : desc.val_col_nums) - right[col_num] = (*raw_columns[col_num])[row_number].template get(); + right[col_num] = (*raw_columns[col_num])[row_number].template safeGet(); auto at_ith_column_jth_row = [&](const Row & matrix, size_t i, size_t j) -> const Field & { - return matrix[i].get()[j]; + return matrix[i].safeGet()[j]; }; auto tuple_of_nth_columns_at_jth_row = [&](const Row & matrix, const ColumnNumbers & col_nums, size_t j) -> Array @@ -160,7 +160,7 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc, auto merge = [&](const Row & matrix) { - size_t rows = matrix[desc.key_col_nums[0]].get().size(); + size_t rows = matrix[desc.key_col_nums[0]].safeGet().size(); for (size_t j = 0; j < rows; ++j) { @@ -190,10 +190,10 @@ static bool mergeMap(const SummingSortedAlgorithm::MapDescription & desc, for (const auto & key_value : merged) { for (size_t col_num_index = 0, size = desc.key_col_nums.size(); col_num_index < size; ++col_num_index) - row[desc.key_col_nums[col_num_index]].get()[row_num] = key_value.first[col_num_index]; + row[desc.key_col_nums[col_num_index]].safeGet()[row_num] = key_value.first[col_num_index]; for (size_t col_num_index = 0, size = desc.val_col_nums.size(); col_num_index < size; ++col_num_index) - row[desc.val_col_nums[col_num_index]].get()[row_num] = key_value.second[col_num_index]; + row[desc.val_col_nums[col_num_index]].safeGet()[row_num] = key_value.second[col_num_index]; ++row_num; } diff --git a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h index 664b171c4b9..74b4e397831 100644 --- a/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h +++ b/src/Processors/Merges/Algorithms/SummingSortedAlgorithm.h @@ -30,6 +30,8 @@ public: void consume(Input & input, size_t source_num) override; Status merge() override; + MergedStats getMergedStats() const override { return merged_data.getMergedStats(); } + struct AggregateDescription; struct MapDescription; diff --git a/src/Processors/Merges/CollapsingSortedTransform.h b/src/Processors/Merges/CollapsingSortedTransform.h index 4479ac82f66..99fb700abf1 100644 --- a/src/Processors/Merges/CollapsingSortedTransform.h +++ b/src/Processors/Merges/CollapsingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event CollapsingSortedMilliseconds; +} + namespace DB { @@ -36,6 +41,11 @@ public: } String getName() const override { return "CollapsingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::CollapsingSortedMilliseconds, "Collapsed sorted", getLogger("CollapsingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/IMergingTransform.h b/src/Processors/Merges/IMergingTransform.h index be629271736..e5cd3bdde46 100644 --- a/src/Processors/Merges/IMergingTransform.h +++ b/src/Processors/Merges/IMergingTransform.h @@ -2,7 +2,10 @@ #include #include +#include #include +#include +#include namespace DB { @@ -110,6 +113,8 @@ public: void work() override { + Stopwatch watch{CLOCK_MONOTONIC_COARSE}; + if (!state.init_chunks.empty()) algorithm.initialize(std::move(state.init_chunks)); @@ -147,6 +152,8 @@ public: // std::cerr << "Finished" << std::endl; state.is_finished = true; } + + merging_elapsed_ns += watch.elapsedNanoseconds(); } protected: @@ -156,7 +163,33 @@ protected: Algorithm algorithm; /// Profile info. - Stopwatch total_stopwatch {CLOCK_MONOTONIC_COARSE}; + UInt64 merging_elapsed_ns = 0; + + void logMergedStats(ProfileEvents::Event elapsed_ms_event, std::string_view transform_message, LoggerPtr log) const + { + auto stats = algorithm.getMergedStats(); + + UInt64 elapsed_ms = merging_elapsed_ns / 1000000LL; + ProfileEvents::increment(elapsed_ms_event, elapsed_ms); + + /// Don't print info for small parts (< 1M rows) + if (stats.rows < 1000000) + return; + + double seconds = static_cast(merging_elapsed_ns) / 1000000000ULL; + + if (seconds == 0.0) + { + LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in 0 sec.", + transform_message, stats.blocks, stats.rows, stats.bytes); + } + else + { + LOG_DEBUG(log, "{}, {} blocks, {} rows, {} bytes in {} sec., {} rows/sec., {}/sec.", + transform_message, stats.blocks, stats.rows, stats.bytes, + seconds, stats.rows / seconds, ReadableSize(stats.bytes / seconds)); + } + } private: using IMergingTransformBase::state; diff --git a/src/Processors/Merges/MergingSortedTransform.cpp b/src/Processors/Merges/MergingSortedTransform.cpp index 338b1ff7935..d2895a2a2e9 100644 --- a/src/Processors/Merges/MergingSortedTransform.cpp +++ b/src/Processors/Merges/MergingSortedTransform.cpp @@ -1,9 +1,12 @@ #include #include #include - #include -#include + +namespace ProfileEvents +{ + extern const Event MergingSortedMilliseconds; +} namespace DB { @@ -18,7 +21,6 @@ MergingSortedTransform::MergingSortedTransform( UInt64 limit_, bool always_read_till_end_, WriteBuffer * out_row_sources_buf_, - bool quiet_, bool use_average_block_sizes, bool have_all_inputs_) : IMergingTransform( @@ -37,7 +39,6 @@ MergingSortedTransform::MergingSortedTransform( limit_, out_row_sources_buf_, use_average_block_sizes) - , quiet(quiet_) { } @@ -48,22 +49,7 @@ void MergingSortedTransform::onNewInput() void MergingSortedTransform::onFinish() { - if (quiet) - return; - - const auto & merged_data = algorithm.getMergedData(); - - auto log = getLogger("MergingSortedTransform"); - - double seconds = total_stopwatch.elapsedSeconds(); - - if (seconds == 0.0) - LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in 0 sec.", merged_data.totalChunks(), merged_data.totalMergedRows()); - else - LOG_DEBUG(log, "Merge sorted {} blocks, {} rows in {} sec., {} rows/sec., {}/sec", - merged_data.totalChunks(), merged_data.totalMergedRows(), seconds, - merged_data.totalMergedRows() / seconds, - ReadableSize(merged_data.totalAllocatedBytes() / seconds)); + logMergedStats(ProfileEvents::MergingSortedMilliseconds, "Merged sorted", getLogger("MergingSortedTransform")); } } diff --git a/src/Processors/Merges/MergingSortedTransform.h b/src/Processors/Merges/MergingSortedTransform.h index 2b53939f309..6e52450efa7 100644 --- a/src/Processors/Merges/MergingSortedTransform.h +++ b/src/Processors/Merges/MergingSortedTransform.h @@ -21,7 +21,6 @@ public: UInt64 limit_ = 0, bool always_read_till_end_ = false, WriteBuffer * out_row_sources_buf_ = nullptr, - bool quiet_ = false, bool use_average_block_sizes = false, bool have_all_inputs_ = true); @@ -30,9 +29,6 @@ public: protected: void onNewInput() override; void onFinish() override; - -private: - bool quiet = false; }; } diff --git a/src/Processors/Merges/ReplacingSortedTransform.h b/src/Processors/Merges/ReplacingSortedTransform.h index 2657987f161..dc262aab9ee 100644 --- a/src/Processors/Merges/ReplacingSortedTransform.h +++ b/src/Processors/Merges/ReplacingSortedTransform.h @@ -3,6 +3,10 @@ #include #include +namespace ProfileEvents +{ + extern const Event ReplacingSortedMilliseconds; +} namespace DB { @@ -38,6 +42,11 @@ public: } String getName() const override { return "ReplacingSorted"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::ReplacingSortedMilliseconds, "Replaced sorted", getLogger("ReplacingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/SummingSortedTransform.h b/src/Processors/Merges/SummingSortedTransform.h index 70ddebfea95..d7c20223d7e 100644 --- a/src/Processors/Merges/SummingSortedTransform.h +++ b/src/Processors/Merges/SummingSortedTransform.h @@ -3,6 +3,11 @@ #include #include +namespace ProfileEvents +{ + extern const Event SummingSortedMilliseconds; +} + namespace DB { @@ -33,6 +38,11 @@ public: } String getName() const override { return "SummingSortedTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::SummingSortedMilliseconds, "Summed sorted", getLogger("SummingSortedTransform")); + } }; } diff --git a/src/Processors/Merges/VersionedCollapsingTransform.h b/src/Processors/Merges/VersionedCollapsingTransform.h index 18244469bd7..32b5d7bf343 100644 --- a/src/Processors/Merges/VersionedCollapsingTransform.h +++ b/src/Processors/Merges/VersionedCollapsingTransform.h @@ -3,6 +3,10 @@ #include #include +namespace ProfileEvents +{ + extern const Event VersionedCollapsingSortedMilliseconds; +} namespace DB { @@ -33,6 +37,11 @@ public: } String getName() const override { return "VersionedCollapsingTransform"; } + + void onFinish() override + { + logMergedStats(ProfileEvents::VersionedCollapsingSortedMilliseconds, "Versioned collapsed sorted", getLogger("VersionedCollapsingTransform")); + } }; } diff --git a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp index 52d1931c51e..b31ee7ea53c 100644 --- a/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp +++ b/src/Processors/QueryPlan/Optimizations/optimizeUseAggregateProjection.cpp @@ -255,20 +255,13 @@ static void appendAggregateFunctions( const auto * node = input; - if (node->result_name != aggregate.column_name) - { - if (DataTypeAggregateFunction::strictEquals(type, node->result_type)) - { - node = &proj_dag.addAlias(*node, aggregate.column_name); - } - else - { - /// Cast to aggregate types specified in query if it's not - /// strictly the same as the one specified in projection. This - /// is required to generate correct results during finalization. - node = &proj_dag.addCast(*node, type, aggregate.column_name); - } - } + if (!DataTypeAggregateFunction::strictEquals(type, node->result_type)) + /// Cast to aggregate types specified in query if it's not + /// strictly the same as the one specified in projection. This + /// is required to generate correct results during finalization. + node = &proj_dag.addCast(*node, type, aggregate.column_name); + else if (node->result_name != aggregate.column_name) + node = &proj_dag.addAlias(*node, aggregate.column_name); proj_dag_outputs.push_back(node); } diff --git a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp index 7cac7bee6ec..f0094f0f8d2 100644 --- a/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp +++ b/src/Processors/QueryPlan/Optimizations/removeRedundantSorting.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -59,9 +60,10 @@ public: if (typeid_cast(current_step) || typeid_cast(current_step) /// (1) if there are LIMITs on top of ORDER BY, the ORDER BY is non-removable - || typeid_cast(current_step) /// (2) if ORDER BY is with FILL WITH, it is non-removable - || typeid_cast(current_step) /// (3) ORDER BY will change order of previous sorting - || typeid_cast(current_step)) /// (4) aggregation change order + || typeid_cast(current_step) /// (2) OFFSET on top of ORDER BY, the ORDER BY is non-removable + || typeid_cast(current_step) /// (3) if ORDER BY is with FILL WITH, it is non-removable + || typeid_cast(current_step) /// (4) ORDER BY will change order of previous sorting + || typeid_cast(current_step)) /// (5) aggregation change order { logStep("nodes_affect_order/push", current_node); nodes_affect_order.push_back(current_node); diff --git a/src/Processors/QueryPlan/ReadFromMergeTree.cpp b/src/Processors/QueryPlan/ReadFromMergeTree.cpp index 901d7c61167..348019d7d10 100644 --- a/src/Processors/QueryPlan/ReadFromMergeTree.cpp +++ b/src/Processors/QueryPlan/ReadFromMergeTree.cpp @@ -24,8 +24,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -52,6 +52,8 @@ #include #include +#include "config.h" + using namespace DB; namespace @@ -1474,16 +1476,14 @@ static void buildIndexes( else { MergeTreeIndexConditionPtr condition; - if (index_helper->isVectorSearch()) + if (index_helper->isVectorSimilarityIndex()) { -#ifdef ENABLE_ANNOY - if (const auto * annoy = typeid_cast(index_helper.get())) - condition = annoy->createIndexCondition(query_info, context); -#endif -#ifdef ENABLE_USEARCH - if (const auto * usearch = typeid_cast(index_helper.get())) - condition = usearch->createIndexCondition(query_info, context); +#if USE_USEARCH + if (const auto * vector_similarity_index = typeid_cast(index_helper.get())) + condition = vector_similarity_index->createIndexCondition(query_info, context); #endif + if (const auto * legacy_vector_similarity_index = typeid_cast(index_helper.get())) + condition = legacy_vector_similarity_index->createIndexCondition(query_info, context); if (!condition) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown vector search index {}", index_helper->index.name); } diff --git a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp index 2080e29ceba..596d08845e1 100644 --- a/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp +++ b/src/Processors/QueryPlan/ReadFromSystemNumbersStep.cpp @@ -119,23 +119,23 @@ using RangesWithStep = std::vector; std::optional steppedRangeFromRange(const Range & r, UInt64 step, UInt64 remainder) { - if ((r.right.get() == 0) && (!r.right_included)) + if ((r.right.safeGet() == 0) && (!r.right_included)) return std::nullopt; - UInt64 begin = (r.left.get() / step) * step; + UInt64 begin = (r.left.safeGet() / step) * step; if (begin > std::numeric_limits::max() - remainder) return std::nullopt; begin += remainder; - while ((r.left_included <= r.left.get()) && (begin <= r.left.get() - r.left_included)) + while ((r.left_included <= r.left.safeGet()) && (begin <= r.left.safeGet() - r.left_included)) { if (std::numeric_limits::max() - step < begin) return std::nullopt; begin += step; } - if ((begin >= r.right_included) && (begin - r.right_included >= r.right.get())) + if ((begin >= r.right_included) && (begin - r.right_included >= r.right.safeGet())) return std::nullopt; - UInt64 right_edge_included = r.right.get() - (1 - r.right_included); + UInt64 right_edge_included = r.right.safeGet() - (1 - r.right_included); return std::optional{RangeWithStep{begin, step, static_cast(right_edge_included - begin) / step + 1}}; } diff --git a/src/Processors/Sources/MySQLSource.cpp b/src/Processors/Sources/MySQLSource.cpp index 5d533a7747e..52be9a6e84a 100644 --- a/src/Processors/Sources/MySQLSource.cpp +++ b/src/Processors/Sources/MySQLSource.cpp @@ -219,11 +219,11 @@ namespace read_bytes_size += 8; break; case ValueType::vtEnum8: - assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).get()); + assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).safeGet()); read_bytes_size += assert_cast(column).byteSize(); break; case ValueType::vtEnum16: - assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).get()); + assert_cast(column).insertValue(assert_cast &>(data_type).castToValue(value.data()).safeGet()); read_bytes_size += assert_cast(column).byteSize(); break; case ValueType::vtString: diff --git a/src/Processors/Sources/PostgreSQLSource.cpp b/src/Processors/Sources/PostgreSQLSource.cpp index a3d6fd691d8..b9bda46bd10 100644 --- a/src/Processors/Sources/PostgreSQLSource.cpp +++ b/src/Processors/Sources/PostgreSQLSource.cpp @@ -35,9 +35,9 @@ PostgreSQLSource::PostgreSQLSource( const Block & sample_block, UInt64 max_block_size_) : ISource(sample_block.cloneEmpty()) - , query_str(query_str_) , max_block_size(max_block_size_) , connection_holder(std::move(connection_holder_)) + , query_str(query_str_) { init(sample_block); } @@ -51,10 +51,10 @@ PostgreSQLSource::PostgreSQLSource( UInt64 max_block_size_, bool auto_commit_) : ISource(sample_block.cloneEmpty()) - , query_str(query_str_) - , tx(std::move(tx_)) , max_block_size(max_block_size_) , auto_commit(auto_commit_) + , query_str(query_str_) + , tx(std::move(tx_)) { init(sample_block); } @@ -204,15 +204,15 @@ PostgreSQLSource::~PostgreSQLSource() */ stream->close(); } - - stream.reset(); - tx.reset(); } catch (...) { tryLogCurrentException(__PRETTY_FUNCTION__); } + stream.reset(); + tx.reset(); + if (connection_holder) connection_holder->setBroken(); } diff --git a/src/Processors/Sources/PostgreSQLSource.h b/src/Processors/Sources/PostgreSQLSource.h index 8a648ae8bb5..319c5d8d7c2 100644 --- a/src/Processors/Sources/PostgreSQLSource.h +++ b/src/Processors/Sources/PostgreSQLSource.h @@ -38,14 +38,12 @@ protected: UInt64 max_block_size_, bool auto_commit_); - String query_str; - std::shared_ptr tx; - std::unique_ptr stream; - Status prepare() override; - void onStart(); Chunk generate() override; + + void onStart(); + void onFinish(); private: @@ -61,6 +59,12 @@ private: postgres::ConnectionHolderPtr connection_holder; std::unordered_map array_info; + +protected: + String query_str; + /// tx and stream must be destroyed before connection_holder. + std::shared_ptr tx; + std::unique_ptr stream; }; diff --git a/src/Processors/Sources/ShellCommandSource.cpp b/src/Processors/Sources/ShellCommandSource.cpp index 55eaf67eb3b..f55a3713215 100644 --- a/src/Processors/Sources/ShellCommandSource.cpp +++ b/src/Processors/Sources/ShellCommandSource.cpp @@ -8,13 +8,15 @@ #include #include -#include -#include -#include -#include #include +#include +#include +#include +#include + #include +#include namespace DB { @@ -68,11 +70,17 @@ static void makeFdBlocking(int fd) static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_milliseconds) { + auto logger = getLogger("TimeoutReadBufferFromFileDescriptor"); + auto describe_fd = [](const auto & pollfd) { return fmt::format("(fd={}, flags={})", pollfd.fd, fcntl(pollfd.fd, F_GETFL)); }; + int res; while (true) { Stopwatch watch; + + LOG_TEST(logger, "Polling descriptors: {}", fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", ")); + res = poll(pfds, static_cast(num), static_cast(timeout_milliseconds)); if (res < 0) @@ -82,7 +90,10 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond const auto elapsed = watch.elapsedMilliseconds(); if (timeout_milliseconds <= elapsed) + { + LOG_TEST(logger, "Timeout exceeded: elapsed={}, timeout={}", elapsed, timeout_milliseconds); break; + } timeout_milliseconds -= elapsed; } else @@ -91,6 +102,12 @@ static int pollWithTimeout(pollfd * pfds, size_t num, size_t timeout_millisecond } } + LOG_TEST( + logger, + "Poll for descriptors: {} returned {}", + fmt::join(std::span(pfds, pfds + num) | std::views::transform(describe_fd), ", "), + res); + return res; } @@ -200,12 +217,6 @@ public: return true; } - void reset() const - { - makeFdBlocking(stdout_fd); - makeFdBlocking(stderr_fd); - } - ~TimeoutReadBufferFromFileDescriptor() override { tryMakeFdBlocking(stdout_fd); diff --git a/src/Processors/Transforms/ColumnGathererTransform.cpp b/src/Processors/Transforms/ColumnGathererTransform.cpp index 15f8355bdc7..52fa42fdb51 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.cpp +++ b/src/Processors/Transforms/ColumnGathererTransform.cpp @@ -1,11 +1,15 @@ #include +#include #include #include #include #include #include -#include +namespace ProfileEvents +{ + extern const Event GatheringColumnMilliseconds; +} namespace DB { @@ -33,6 +37,13 @@ ColumnGathererStream::ColumnGathererStream( throw Exception(ErrorCodes::EMPTY_DATA_PASSED, "There are no streams to gather"); } +void ColumnGathererStream::updateStats(const IColumn & column) +{ + merged_rows += column.size(); + merged_bytes += column.allocatedBytes(); + ++merged_blocks; +} + void ColumnGathererStream::initialize(Inputs inputs) { Columns source_columns; @@ -82,7 +93,9 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() { res.addColumn(source_to_fully_copy->column); } - merged_rows += source_to_fully_copy->size; + + updateStats(*source_to_fully_copy->column); + source_to_fully_copy->pos = source_to_fully_copy->size; source_to_fully_copy = nullptr; return Status(std::move(res)); @@ -96,8 +109,7 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() { next_required_source = 0; Chunk res; - merged_rows += sources.front().column->size(); - merged_bytes += sources.front().column->allocatedBytes(); + updateStats(*sources.front().column); res.addColumn(std::move(sources.front().column)); sources.front().pos = sources.front().size = 0; return Status(std::move(res)); @@ -123,8 +135,8 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() if (source_to_fully_copy && result_column->empty()) { Chunk res; - merged_rows += source_to_fully_copy->column->size(); - merged_bytes += source_to_fully_copy->column->allocatedBytes(); + updateStats(*source_to_fully_copy->column); + if (result_column->hasDynamicStructure()) { auto col = result_column->cloneEmpty(); @@ -140,13 +152,13 @@ IMergingAlgorithm::Status ColumnGathererStream::merge() return Status(std::move(res)); } - auto col = result_column->cloneEmpty(); - result_column.swap(col); + auto return_column = result_column->cloneEmpty(); + result_column.swap(return_column); Chunk res; - merged_rows += col->size(); - merged_bytes += col->allocatedBytes(); - res.addColumn(std::move(col)); + updateStats(*return_column); + + res.addColumn(std::move(return_column)); return Status(std::move(res), row_sources_buf.eof() && !source_to_fully_copy); } @@ -185,31 +197,10 @@ ColumnGathererTransform::ColumnGathererTransform( toString(header.columns())); } -void ColumnGathererTransform::work() -{ - Stopwatch stopwatch; - IMergingTransform::work(); - elapsed_ns += stopwatch.elapsedNanoseconds(); -} - void ColumnGathererTransform::onFinish() { - auto merged_rows = algorithm.getMergedRows(); - auto merged_bytes = algorithm.getMergedRows(); - /// Don't print info for small parts (< 10M rows) - if (merged_rows < 10000000) - return; - - double seconds = static_cast(elapsed_ns) / 1000000000ULL; const auto & column_name = getOutputPort().getHeader().getByPosition(0).name; - - if (seconds == 0.0) - LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in 0 sec.", - column_name, static_cast(merged_bytes) / merged_rows); - else - LOG_DEBUG(log, "Gathered column {} ({} bytes/elem.) in {} sec., {} rows/sec., {}/sec.", - column_name, static_cast(merged_bytes) / merged_rows, seconds, - merged_rows / seconds, ReadableSize(merged_bytes / seconds)); + logMergedStats(ProfileEvents::GatheringColumnMilliseconds, fmt::format("Gathered column {}", column_name), log); } } diff --git a/src/Processors/Transforms/ColumnGathererTransform.h b/src/Processors/Transforms/ColumnGathererTransform.h index ec5691316ce..fbc9a6bfcc6 100644 --- a/src/Processors/Transforms/ColumnGathererTransform.h +++ b/src/Processors/Transforms/ColumnGathererTransform.h @@ -72,10 +72,11 @@ public: template void gather(Column & column_res); - UInt64 getMergedRows() const { return merged_rows; } - UInt64 getMergedBytes() const { return merged_bytes; } + MergedStats getMergedStats() const override { return {.bytes = merged_bytes, .rows = merged_rows, .blocks = merged_blocks}; } private: + void updateStats(const IColumn & column); + /// Cache required fields struct Source { @@ -105,6 +106,7 @@ private: ssize_t next_required_source = -1; UInt64 merged_rows = 0; UInt64 merged_bytes = 0; + UInt64 merged_blocks = 0; }; class ColumnGathererTransform final : public IMergingTransform @@ -120,12 +122,8 @@ public: String getName() const override { return "ColumnGathererTransform"; } - void work() override; - protected: void onFinish() override; - UInt64 elapsed_ns = 0; - LoggerPtr log; }; diff --git a/src/Processors/Transforms/FillingTransform.cpp b/src/Processors/Transforms/FillingTransform.cpp index 9601f821cc8..95f4a674ebb 100644 --- a/src/Processors/Transforms/FillingTransform.cpp +++ b/src/Processors/Transforms/FillingTransform.cpp @@ -62,7 +62,7 @@ static FillColumnDescription::StepFunction getStepFunction( case IntervalKind::Kind::NAME: \ return [step, scale, &date_lut](Field & field) { \ field = Add##NAME##sImpl::execute(static_cast(\ - field.get()), static_cast(step), date_lut, utc_time_zone, scale); }; + field.safeGet()), static_cast(step), date_lut, utc_time_zone, scale); }; FOR_EACH_INTERVAL_KIND(DECLARE_CASE) #undef DECLARE_CASE @@ -139,21 +139,21 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & { if (which.isDate() || which.isDate32()) { - Int64 avg_seconds = descr.fill_step.get() * descr.step_kind->toAvgSeconds(); + Int64 avg_seconds = descr.fill_step.safeGet() * descr.step_kind->toAvgSeconds(); if (std::abs(avg_seconds) < 86400) throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION, "Value of step is to low ({} seconds). Must be >= 1 day", std::abs(avg_seconds)); } if (which.isDate()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.get(), DateLUT::instance()); + descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); else if (which.isDate32()) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.get(), DateLUT::instance()); + descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), DateLUT::instance()); else if (const auto * date_time = checkAndGetDataType(type.get())) - descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.get(), date_time->getTimeZone()); + descr.step_func = getStepFunction(*descr.step_kind, descr.fill_step.safeGet(), date_time->getTimeZone()); else if (const auto * date_time64 = checkAndGetDataType(type.get())) { - const auto & step_dec = descr.fill_step.get &>(); + const auto & step_dec = descr.fill_step.safeGet &>(); Int64 step = DecimalUtils::convertTo(step_dec.getValue(), step_dec.getScale()); static const DateLUTImpl & utc_time_zone = DateLUT::instance("UTC"); @@ -163,7 +163,7 @@ static bool tryConvertFields(FillColumnDescription & descr, const DataTypePtr & case IntervalKind::Kind::NAME: \ descr.step_func = [step, &time_zone = date_time64->getTimeZone()](Field & field) \ { \ - auto field_decimal = field.get>(); \ + auto field_decimal = field.safeGet>(); \ auto res = Add##NAME##sImpl::execute(field_decimal.getValue(), step, time_zone, utc_time_zone, field_decimal.getScale()); \ field = DecimalField(res, field_decimal.getScale()); \ }; \ diff --git a/src/Processors/Transforms/MergeJoinTransform.cpp b/src/Processors/Transforms/MergeJoinTransform.cpp index e96a75d277b..6abfa0fccd0 100644 --- a/src/Processors/Transforms/MergeJoinTransform.cpp +++ b/src/Processors/Transforms/MergeJoinTransform.cpp @@ -511,6 +511,16 @@ void MergeJoinAlgorithm::logElapsed(double seconds) stat.max_blocks_loaded); } +IMergingAlgorithm::MergedStats MergeJoinAlgorithm::getMergedStats() const +{ + return + { + .bytes = stat.num_bytes[0] + stat.num_bytes[1], + .rows = stat.num_rows[0] + stat.num_rows[1], + .blocks = stat.num_blocks[0] + stat.num_blocks[1], + }; +} + static void prepareChunk(Chunk & chunk) { if (!chunk) @@ -547,6 +557,7 @@ void MergeJoinAlgorithm::consume(Input & input, size_t source_num) { stat.num_blocks[source_num] += 1; stat.num_rows[source_num] += input.chunk.getNumRows(); + stat.num_bytes[source_num] += input.chunk.allocatedBytes(); } prepareChunk(input.chunk); @@ -1271,7 +1282,7 @@ MergeJoinTransform::MergeJoinTransform( void MergeJoinTransform::onFinish() { - algorithm.logElapsed(total_stopwatch.elapsedSeconds()); + algorithm.logElapsed(static_cast(merging_elapsed_ns) / 1000000000ULL); } } diff --git a/src/Processors/Transforms/MergeJoinTransform.h b/src/Processors/Transforms/MergeJoinTransform.h index d37a0b9f3ae..8f74974af0f 100644 --- a/src/Processors/Transforms/MergeJoinTransform.h +++ b/src/Processors/Transforms/MergeJoinTransform.h @@ -245,6 +245,8 @@ public: void setAsofInequality(ASOFJoinInequality asof_inequality_); void logElapsed(double seconds); + MergedStats getMergedStats() const override; + private: std::optional handleAnyJoinState(); Status anyJoin(); @@ -280,6 +282,7 @@ private: { size_t num_blocks[2] = {0, 0}; size_t num_rows[2] = {0, 0}; + size_t num_bytes[2] = {0, 0}; size_t max_blocks_loaded = 0; }; diff --git a/src/Processors/Transforms/MergeSortingTransform.cpp b/src/Processors/Transforms/MergeSortingTransform.cpp index ede13b29219..c45192e7118 100644 --- a/src/Processors/Transforms/MergeSortingTransform.cpp +++ b/src/Processors/Transforms/MergeSortingTransform.cpp @@ -185,7 +185,6 @@ void MergeSortingTransform::consume(Chunk chunk) if (!external_merging_sorted) { - bool quiet = false; bool have_all_inputs = false; bool use_average_block_sizes = false; @@ -199,7 +198,6 @@ void MergeSortingTransform::consume(Chunk chunk) limit, /*always_read_till_end_=*/ false, nullptr, - quiet, use_average_block_sizes, have_all_inputs); diff --git a/src/Processors/Transforms/PasteJoinTransform.cpp b/src/Processors/Transforms/PasteJoinTransform.cpp index d2fa7eed256..982a347a70f 100644 --- a/src/Processors/Transforms/PasteJoinTransform.cpp +++ b/src/Processors/Transforms/PasteJoinTransform.cpp @@ -58,6 +58,16 @@ static void prepareChunk(Chunk & chunk) chunk.setColumns(std::move(columns), num_rows); } +IMergingAlgorithm::MergedStats PasteJoinAlgorithm::getMergedStats() const +{ + return + { + .bytes = stat.num_bytes[0] + stat.num_bytes[1], + .rows = stat.num_rows[0] + stat.num_rows[1], + .blocks = stat.num_blocks[0] + stat.num_blocks[1], + }; +} + void PasteJoinAlgorithm::initialize(Inputs inputs) { if (inputs.size() != 2) diff --git a/src/Processors/Transforms/PasteJoinTransform.h b/src/Processors/Transforms/PasteJoinTransform.h index 6a7e65ee27c..c184f20362d 100644 --- a/src/Processors/Transforms/PasteJoinTransform.h +++ b/src/Processors/Transforms/PasteJoinTransform.h @@ -35,8 +35,7 @@ public: void initialize(Inputs inputs) override; void consume(Input & input, size_t source_num) override; Status merge() override; - - void logElapsed(double seconds); + MergedStats getMergedStats() const override; private: Chunk createBlockWithDefaults(size_t source_num); @@ -55,6 +54,7 @@ private: { size_t num_blocks[2] = {0, 0}; size_t num_rows[2] = {0, 0}; + size_t num_bytes[2] = {0, 0}; size_t max_blocks_loaded = 0; }; diff --git a/src/Processors/Transforms/WindowTransform.cpp b/src/Processors/Transforms/WindowTransform.cpp index c26cd7cc8c3..bd11aa4cd28 100644 --- a/src/Processors/Transforms/WindowTransform.cpp +++ b/src/Processors/Transforms/WindowTransform.cpp @@ -85,7 +85,7 @@ static int compareValuesWithOffset(const IColumn * _compared_column, using ValueType = typename ColumnType::ValueType; // Note that the storage type of offset returned by get<> is different, so // we need to specify the type explicitly. - const ValueType offset = static_cast(_offset.get()); + const ValueType offset = static_cast(_offset.safeGet()); assert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); @@ -140,7 +140,7 @@ static int compareValuesWithOffsetFloat(const IColumn * _compared_column, _compared_column); const auto * reference_column = assert_cast( _reference_column); - const auto offset = _offset.get(); + const auto offset = _offset.safeGet(); chassert(offset >= 0); const auto compared_value_data = compared_column->getDataAt(compared_row); @@ -609,7 +609,7 @@ void WindowTransform::advanceFrameStartRowsOffset() { // Just recalculate it each time by walking blocks. const auto [moved_row, offset_left] = moveRowNumber(current_row, - window_description.frame.begin_offset.get() + window_description.frame.begin_offset.safeGet() * (window_description.frame.begin_preceding ? -1 : 1)); frame_start = moved_row; @@ -848,7 +848,7 @@ void WindowTransform::advanceFrameEndRowsOffset() // Walk the specified offset from the current row. The "+1" is needed // because the frame_end is a past-the-end pointer. const auto [moved_row, offset_left] = moveRowNumber(current_row, - window_description.frame.end_offset.get() + window_description.frame.end_offset.safeGet() * (window_description.frame.end_preceding ? -1 : 1) + 1); @@ -1157,8 +1157,7 @@ void WindowTransform::appendChunk(Chunk & chunk) // Initialize output columns. for (auto & ws : workspaces) { - if (ws.window_function_impl) - block.casted_columns.push_back(ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices)); + block.casted_columns.push_back(ws.window_function_impl ? ws.window_function_impl->castColumn(block.input_columns, ws.argument_column_indices) : nullptr); block.output_columns.push_back(ws.aggregate_function->getResultType() ->createColumn()); @@ -2105,13 +2104,13 @@ namespace throw Exception(ErrorCodes::BAD_ARGUMENTS, "Argument of 'ntile' function must be a constant"); auto type_id = argument_types[0]->getTypeId(); if (type_id == TypeIndex::UInt8) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); else if (type_id == TypeIndex::UInt16) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); else if (type_id == TypeIndex::UInt32) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); else if (type_id == TypeIndex::UInt64) - buckets = arg_col[transform->current_row.row].get(); + buckets = arg_col[transform->current_row.row].safeGet(); if (!buckets) { @@ -2337,22 +2336,9 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction argument_types[2]->getName()); } - const auto from_name = argument_types[2]->getName(); - const auto to_name = argument_types[0]->getName(); - ColumnsWithTypeAndName arguments + auto get_cast_func = [from = argument_types[2], to = argument_types[0]] { - { argument_types[2], "" }, - { - DataTypeString().createColumnConst(0, to_name), - std::make_shared(), - "" - } - }; - - auto get_cast_func = [&arguments] - { - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::accurate, {}); - return func_builder_cast->build(arguments); + return createInternalCast({from, {}}, to, CastType::accurate, {}); }; func_cast = get_cast_func(); @@ -2402,7 +2388,7 @@ struct WindowFunctionLagLeadInFrame final : public WindowFunction { offset = (*current_block.input_columns[ workspace.argument_column_indices[1]])[ - transform->current_row.row].get(); + transform->current_row.row].safeGet(); /// Either overflow or really negative value, both is not acceptable. if (offset < 0) @@ -2488,7 +2474,7 @@ struct WindowFunctionNthValue final : public WindowFunction Int64 offset = (*current_block.input_columns[ workspace.argument_column_indices[1]])[ - transform->current_row.row].get(); + transform->current_row.row].safeGet(); /// Either overflow or really negative value, both is not acceptable. if (offset <= 0) diff --git a/src/Processors/tests/gtest_full_sorting_join.cpp b/src/Processors/tests/gtest_full_sorting_join.cpp index f678d7984e8..befe5e28b5d 100644 --- a/src/Processors/tests/gtest_full_sorting_join.cpp +++ b/src/Processors/tests/gtest_full_sorting_join.cpp @@ -208,6 +208,12 @@ Block executePipeline(QueryPipeline && pipeline) template void assertColumnVectorEq(const typename ColumnVector::Container & expected, const Block & block, const std::string & name) { + if (expected.empty()) + { + ASSERT_TRUE(block.columns() == 0); + return; + } + const auto * actual = typeid_cast *>(block.getByName(name).column.get()); ASSERT_TRUE(actual) << "unexpected column type: " << block.getByName(name).column->dumpStructure() << "expected: " << typeid(ColumnVector).name(); @@ -230,6 +236,12 @@ void assertColumnVectorEq(const typename ColumnVector::Container & expected, template void assertColumnEq(const IColumn & expected, const Block & block, const std::string & name) { + if (expected.empty()) + { + ASSERT_TRUE(block.columns() == 0); + return; + } + const ColumnPtr & actual = block.getByName(name).column; ASSERT_TRUE(checkColumn(*actual)); ASSERT_TRUE(checkColumn(expected)); diff --git a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp index bc22f249f97..f41a447049c 100644 --- a/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp +++ b/src/QueryPipeline/tests/gtest_blocks_size_merging_streams.cpp @@ -83,7 +83,7 @@ TEST(MergingSortedTest, SimpleBlockSizeTest) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true); + 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true); pipe.addTransform(std::move(transform)); @@ -125,7 +125,7 @@ TEST(MergingSortedTest, MoreInterestingBlockSizes) EXPECT_EQ(pipe.numOutputPorts(), 3); auto transform = std::make_shared(pipe.getHeader(), pipe.numOutputPorts(), sort_description, - 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, false, true); + 8192, /*max_block_size_bytes=*/0, SortingQueueStrategy::Batch, 0, false, nullptr, true); pipe.addTransform(std::move(transform)); diff --git a/src/Server/HTTP/HTTPServerResponse.h b/src/Server/HTTP/HTTPServerResponse.h index ac4f52e7766..51f5814556d 100644 --- a/src/Server/HTTP/HTTPServerResponse.h +++ b/src/Server/HTTP/HTTPServerResponse.h @@ -248,6 +248,8 @@ public: void attachRequest(HTTPServerRequest * request_) { request = request_; } + const Poco::Net::HTTPServerSession & getSession() const { return session; } + private: Poco::Net::HTTPServerSession & session; HTTPServerRequest * request = nullptr; diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp index e2098b284bf..2fcb66ae606 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp @@ -30,7 +30,7 @@ void WriteBufferFromHTTPServerResponse::startSendHeaders() if (add_cors_header) response.set("Access-Control-Allow-Origin", "*"); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); std::stringstream header; //STYLE_CHECK_ALLOW_STD_STRING_STREAM response.beginWrite(header); @@ -119,12 +119,10 @@ void WriteBufferFromHTTPServerResponse::nextImpl() WriteBufferFromHTTPServerResponse::WriteBufferFromHTTPServerResponse( HTTPServerResponse & response_, bool is_http_method_head_, - UInt64 keep_alive_timeout_, const ProfileEvents::Event & write_event_) : HTTPWriteBuffer(response_.getSocket(), write_event_) , response(response_) , is_http_method_head(is_http_method_head_) - , keep_alive_timeout(keep_alive_timeout_) { } diff --git a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h index a3952b7c553..f0c80f24582 100644 --- a/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h +++ b/src/Server/HTTP/WriteBufferFromHTTPServerResponse.h @@ -29,7 +29,6 @@ public: WriteBufferFromHTTPServerResponse( HTTPServerResponse & response_, bool is_http_method_head_, - UInt64 keep_alive_timeout_, const ProfileEvents::Event & write_event_ = ProfileEvents::end()); ~WriteBufferFromHTTPServerResponse() override; @@ -91,7 +90,6 @@ private: bool is_http_method_head; bool add_cors_header = false; - size_t keep_alive_timeout = 0; bool initialized = false; diff --git a/src/Server/HTTP/sendExceptionToHTTPClient.cpp b/src/Server/HTTP/sendExceptionToHTTPClient.cpp index 321c4866e8f..07a649dc396 100644 --- a/src/Server/HTTP/sendExceptionToHTTPClient.cpp +++ b/src/Server/HTTP/sendExceptionToHTTPClient.cpp @@ -29,7 +29,7 @@ void sendExceptionToHTTPClient( if (!out) { /// If nothing was sent yet. - WriteBufferFromHTTPServerResponse out_for_message{response, request.getMethod() == HTTPRequest::HTTP_HEAD, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT}; + WriteBufferFromHTTPServerResponse out_for_message{response, request.getMethod() == HTTPRequest::HTTP_HEAD}; out_for_message.writeln(exception_message); out_for_message.finalize(); diff --git a/src/Server/HTTPHandler.cpp b/src/Server/HTTPHandler.cpp index c8a58527f2c..d2bc22e98cc 100644 --- a/src/Server/HTTPHandler.cpp +++ b/src/Server/HTTPHandler.cpp @@ -266,7 +266,6 @@ void HTTPHandler::processQuery( std::make_shared( response, request.getMethod() == HTTPRequest::HTTP_HEAD, - context->getServerSettings().keep_alive_timeout.totalSeconds(), write_event); used_output.out = used_output.out_holder; used_output.out_maybe_compressed = used_output.out_holder; @@ -558,7 +557,7 @@ try if (!used_output.out_holder && !used_output.exception_is_written) { /// If nothing was sent yet and we don't even know if we must compress the response. - WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD, DEFAULT_HTTP_KEEP_ALIVE_TIMEOUT).writeln(s); + WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD).writeln(s); } else if (used_output.out_maybe_compressed) { diff --git a/src/Server/HTTPHandlerFactory.cpp b/src/Server/HTTPHandlerFactory.cpp index 2e7f16f59d7..fc31ad2874e 100644 --- a/src/Server/HTTPHandlerFactory.cpp +++ b/src/Server/HTTPHandlerFactory.cpp @@ -122,7 +122,8 @@ static inline auto createHandlersFactoryFromConfig( } else if (handler_type == "prometheus") { - main_handler_factory->addHandler(createPrometheusHandlerFactoryForHTTPRule(server, config, prefix + "." + key, async_metrics)); + main_handler_factory->addHandler( + createPrometheusHandlerFactoryForHTTPRule(server, config, prefix + "." + key, async_metrics)); } else if (handler_type == "replicas_status") { diff --git a/src/Server/InterserverIOHTTPHandler.cpp b/src/Server/InterserverIOHTTPHandler.cpp index e46021c8e68..59852c79139 100644 --- a/src/Server/InterserverIOHTTPHandler.cpp +++ b/src/Server/InterserverIOHTTPHandler.cpp @@ -87,9 +87,8 @@ void InterserverIOHTTPHandler::handleRequest(HTTPServerRequest & request, HTTPSe response.setChunkedTransferEncoding(true); Output used_output; - const auto keep_alive_timeout = server.context()->getServerSettings().keep_alive_timeout.totalSeconds(); used_output.out = std::make_shared( - response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, keep_alive_timeout, write_event); + response, request.getMethod() == Poco::Net::HTTPRequest::HTTP_HEAD, write_event); auto finalize_output = [&] { diff --git a/src/Server/PrometheusRequestHandler.cpp b/src/Server/PrometheusRequestHandler.cpp index bbd9a978c91..ae1fb6d629e 100644 --- a/src/Server/PrometheusRequestHandler.cpp +++ b/src/Server/PrometheusRequestHandler.cpp @@ -95,7 +95,7 @@ public: class PrometheusRequestHandler::ImplWithContext : public Impl { public: - explicit ImplWithContext(PrometheusRequestHandler & parent) : Impl(parent), default_settings(parent.server.context()->getSettingsRef()) { } + explicit ImplWithContext(PrometheusRequestHandler & parent) : Impl(parent), default_settings(server().context()->getSettingsRef()) { } virtual void handlingRequestWithContext(HTTPServerRequest & request, HTTPServerResponse & response) = 0; @@ -353,7 +353,7 @@ void PrometheusRequestHandler::handleRequest(HTTPServerRequest & request, HTTPSe if (request.getVersion() == HTTPServerRequest::HTTP_1_1) response.setChunkedTransferEncoding(true); - setResponseDefaultHeaders(response, config.keep_alive_timeout); + setResponseDefaultHeaders(response); impl->beforeHandlingRequest(request); impl->handleRequest(request, response); @@ -379,7 +379,7 @@ WriteBufferFromHTTPServerResponse & PrometheusRequestHandler::getOutputStream(HT if (write_buffer_from_response) return *write_buffer_from_response; write_buffer_from_response = std::make_unique( - response, http_method == HTTPRequest::HTTP_HEAD, config.keep_alive_timeout, write_event); + response, http_method == HTTPRequest::HTTP_HEAD, write_event); return *write_buffer_from_response; } @@ -399,7 +399,7 @@ void PrometheusRequestHandler::finalizeResponse(HTTPServerResponse & response) if (write_buffer_from_response) std::exchange(write_buffer_from_response, {})->finalize(); else - WriteBufferFromHTTPServerResponse{response, http_method == HTTPRequest::HTTP_HEAD, config.keep_alive_timeout, write_event}.finalize(); + WriteBufferFromHTTPServerResponse{response, http_method == HTTPRequest::HTTP_HEAD, write_event}.finalize(); } chassert(response_finalized && !write_buffer_from_response); } diff --git a/src/Server/PrometheusRequestHandler.h b/src/Server/PrometheusRequestHandler.h index 3a2dac0b7ad..281ecf5260e 100644 --- a/src/Server/PrometheusRequestHandler.h +++ b/src/Server/PrometheusRequestHandler.h @@ -15,8 +15,11 @@ class WriteBufferFromHTTPServerResponse; class PrometheusRequestHandler : public HTTPRequestHandler { public: - PrometheusRequestHandler(IServer & server_, const PrometheusRequestHandlerConfig & config_, - const AsynchronousMetrics & async_metrics_, std::shared_ptr metrics_writer_); + PrometheusRequestHandler( + IServer & server_, + const PrometheusRequestHandlerConfig & config_, + const AsynchronousMetrics & async_metrics_, + std::shared_ptr metrics_writer_); ~PrometheusRequestHandler() override; void handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & write_event_) override; diff --git a/src/Server/ReplicasStatusHandler.cpp b/src/Server/ReplicasStatusHandler.cpp index f43357db0a8..419ad635d0d 100644 --- a/src/Server/ReplicasStatusHandler.cpp +++ b/src/Server/ReplicasStatusHandler.cpp @@ -89,8 +89,7 @@ void ReplicasStatusHandler::handleRequest(HTTPServerRequest & request, HTTPServe } } - const auto & server_settings = getContext()->getServerSettings(); - setResponseDefaultHeaders(response, server_settings.keep_alive_timeout.totalSeconds()); + setResponseDefaultHeaders(response); if (!ok) { diff --git a/src/Server/StaticRequestHandler.cpp b/src/Server/StaticRequestHandler.cpp index f3981dea9fb..d8c0765bca4 100644 --- a/src/Server/StaticRequestHandler.cpp +++ b/src/Server/StaticRequestHandler.cpp @@ -35,10 +35,9 @@ namespace ErrorCodes extern const int INVALID_CONFIG_PARAMETER; } -static inline std::unique_ptr -responseWriteBuffer(HTTPServerRequest & request, HTTPServerResponse & response, UInt64 keep_alive_timeout) +static inline std::unique_ptr responseWriteBuffer(HTTPServerRequest & request, HTTPServerResponse & response) { - auto buf = std::unique_ptr(new WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD, keep_alive_timeout)); + auto buf = std::unique_ptr(new WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD)); /// The client can pass a HTTP header indicating supported compression method (gzip or deflate). String http_response_compression_methods = request.get("Accept-Encoding", ""); @@ -91,8 +90,7 @@ static inline void trySendExceptionToClient( void StaticRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event & /*write_event*/) { - auto keep_alive_timeout = server.context()->getServerSettings().keep_alive_timeout.totalSeconds(); - auto out = responseWriteBuffer(request, response, keep_alive_timeout); + auto out = responseWriteBuffer(request, response); try { @@ -107,7 +105,7 @@ void StaticRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServer "The Transfer-Encoding is not chunked and there " "is no Content-Length header for POST request"); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTPStatus(status)); writeResponse(*out); } diff --git a/src/Server/TCPHandler.cpp b/src/Server/TCPHandler.cpp index 679f72b85ff..2b9a7295198 100644 --- a/src/Server/TCPHandler.cpp +++ b/src/Server/TCPHandler.cpp @@ -103,6 +103,7 @@ namespace DB::ErrorCodes extern const int SUPPORT_IS_DISABLED; extern const int UNSUPPORTED_METHOD; extern const int USER_EXPIRED; + extern const int NETWORK_ERROR; } namespace @@ -254,8 +255,8 @@ void TCPHandler::runImpl() socket().setSendTimeout(send_timeout); socket().setNoDelay(true); - in = std::make_shared(socket(), read_event); - out = std::make_shared(socket(), write_event); + in = std::make_shared(socket(), read_event); + out = std::make_shared(socket(), write_event); /// Support for PROXY protocol if (parse_proxy_protocol && !receiveProxyHeader()) @@ -280,6 +281,48 @@ void TCPHandler::runImpl() if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM) receiveAddendum(); + { + /// Server side of chunked protocol negotiation. + /// Server advertises its protocol capabilities (separate for send and receive channels) by sending + /// in its 'Hello' response one of four types - chunked, notchunked, chunked_optional, notchunked_optional. + /// Not optional types are strict meaning that server only supports this type, optional means that + /// server prefer this type but capable to work in opposite. + /// Client selects which type it is going to communicate based on the settings from config or arguments, + /// and sends either "chunked" or "notchunked" protocol request in addendum section of handshake. + /// Client can detect if server's protocol capabilities are not compatible with client's settings (for example + /// server strictly requires chunked protocol but client's settings only allows notchunked protocol) - in such case + /// client should interrupt this connection. However if client continues with incompatible protocol type request, server + /// will send appropriate exception and disconnect client. + + auto is_chunked = [](const String & chunked_srv_str, const String & chunked_cl_str, const String & direction) + { + bool chunked_srv = chunked_srv_str.starts_with("chunked"); + bool optional_srv = chunked_srv_str.ends_with("_optional"); + bool chunked_cl = chunked_cl_str.starts_with("chunked"); + + if (optional_srv) + return chunked_cl; + + if (chunked_cl != chunked_srv) + throw NetException( + ErrorCodes::NETWORK_ERROR, + "Incompatible protocol: {} is {}, client requested {}", + direction, + chunked_srv ? "chunked" : "notchunked", + chunked_cl ? "chunked" : "notchunked"); + + return chunked_srv; + }; + + bool out_chunked = is_chunked(server.config().getString("proto_caps.send", "notchunked"), proto_recv_chunked_cl, "send"); + bool in_chunked = is_chunked(server.config().getString("proto_caps.recv", "notchunked"), proto_send_chunked_cl, "recv"); + + if (out_chunked) + out->enableChunked(); + if (in_chunked) + in->enableChunked(); + } + if (!is_interserver_mode) { /// If session created, then settings in session context has been updated. @@ -321,7 +364,7 @@ void TCPHandler::runImpl() { Stopwatch idle_time; UInt64 timeout_ms = std::min(poll_interval, idle_connection_timeout) * 1000000; - while (tcp_server.isOpen() && !server.isCancelled() && !static_cast(*in).poll(timeout_ms)) + while (tcp_server.isOpen() && !server.isCancelled() && !in->poll(timeout_ms)) { if (idle_time.elapsedSeconds() > idle_connection_timeout) { @@ -796,7 +839,7 @@ bool TCPHandler::readDataNext() /// We are waiting for a packet from the client. Thus, every `POLL_INTERVAL` seconds check whether we need to shut down. while (true) { - if (static_cast(*in).poll(timeout_us)) + if (in->poll(timeout_us)) { /// If client disconnected. if (in->eof()) @@ -1186,6 +1229,8 @@ void TCPHandler::processTablesStatusRequest() } response.write(*out, client_tcp_protocol_version); + + out->finishChunk(); } void TCPHandler::receiveUnexpectedTablesStatusRequest() @@ -1206,6 +1251,8 @@ void TCPHandler::sendPartUUIDs() writeVarUInt(Protocol::Server::PartUUIDs, *out); writeVectorBinary(uuids, *out); + + out->finishChunk(); out->next(); } } @@ -1214,6 +1261,8 @@ void TCPHandler::sendPartUUIDs() void TCPHandler::sendReadTaskRequestAssumeLocked() { writeVarUInt(Protocol::Server::ReadTaskRequest, *out); + + out->finishChunk(); out->next(); } @@ -1222,6 +1271,8 @@ void TCPHandler::sendMergeTreeAllRangesAnnouncementAssumeLocked(InitialAllRanges { writeVarUInt(Protocol::Server::MergeTreeAllRangesAnnouncement, *out); announcement.serialize(*out); + + out->finishChunk(); out->next(); } @@ -1230,6 +1281,8 @@ void TCPHandler::sendMergeTreeReadTaskRequestAssumeLocked(ParallelReadRequest re { writeVarUInt(Protocol::Server::MergeTreeReadTaskRequest, *out); request.serialize(*out); + + out->finishChunk(); out->next(); } @@ -1238,6 +1291,8 @@ void TCPHandler::sendProfileInfo(const ProfileInfo & info) { writeVarUInt(Protocol::Server::ProfileInfo, *out); info.write(*out, client_tcp_protocol_version); + + out->finishChunk(); out->next(); } @@ -1253,6 +1308,8 @@ void TCPHandler::sendTotals(const Block & totals) state.block_out->write(totals); state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } } @@ -1269,6 +1326,8 @@ void TCPHandler::sendExtremes(const Block & extremes) state.block_out->write(extremes); state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } } @@ -1286,6 +1345,8 @@ void TCPHandler::sendProfileEvents() writeStringBinary("", *out); state.profile_events_block_out->write(block); + + out->finishChunk(); out->next(); auto elapsed_milliseconds = stopwatch.elapsedMilliseconds(); @@ -1323,6 +1384,8 @@ void TCPHandler::sendTimezone() LOG_DEBUG(log, "TCPHandler::sendTimezone(): {}", tz); writeVarUInt(Protocol::Server::TimezoneUpdate, *out); writeStringBinary(tz, *out); + + out->finishChunk(); out->next(); } @@ -1583,6 +1646,12 @@ void TCPHandler::receiveAddendum() if (!is_interserver_mode) session->setQuotaClientKey(quota_key); + + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + readStringBinary(proto_send_chunked_cl, *in); + readStringBinary(proto_recv_chunked_cl, *in); + } } @@ -1616,6 +1685,11 @@ void TCPHandler::sendHello() writeStringBinary(server_display_name, *out); if (client_tcp_protocol_version >= DBMS_MIN_REVISION_WITH_VERSION_PATCH) writeVarUInt(VERSION_PATCH, *out); + if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_CHUNKED_PACKETS) + { + writeStringBinary(server.config().getString("proto_caps.send", "notchunked"), *out); + writeStringBinary(server.config().getString("proto_caps.recv", "notchunked"), *out); + } if (client_tcp_protocol_version >= DBMS_MIN_PROTOCOL_VERSION_WITH_PASSWORD_COMPLEXITY_RULES) { auto rules = server.context()->getAccessControl().getPasswordComplexityRules(); @@ -1668,6 +1742,7 @@ bool TCPHandler::receivePacket() case Protocol::Client::Ping: writeVarUInt(Protocol::Server::Pong, *out); + out->finishChunk(); out->next(); return false; @@ -2197,7 +2272,7 @@ QueryState::CancellationStatus TCPHandler::getQueryCancellationStatus() after_check_cancelled.restart(); /// During request execution the only packet that can come from the client is stopping the query. - if (static_cast(*in).poll(0)) + if (in->poll(0)) { if (in->eof()) { @@ -2248,19 +2323,33 @@ void TCPHandler::sendData(const Block & block) } writeVarUInt(Protocol::Server::Data, *out); - /// Send external table name (empty name is the main table) - writeStringBinary("", *out); /// For testing hedged requests if (block.rows() > 0 && query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()) { + /// This strange sequence is needed in case of chunked protocol is enabled, in order for client not to + /// hang on receiving of at least packet type - chunk will not be processed unless either chunk footer + /// or chunk continuation header is received - first 'next' is sending starting chunk containing packet type + /// and second 'next' is sending chunk continuation header. + out->next(); + /// Send external table name (empty name is the main table) + writeStringBinary("", *out); out->next(); std::chrono::milliseconds ms(query_context->getSettingsRef().sleep_in_send_data_ms.totalMilliseconds()); std::this_thread::sleep_for(ms); } + else + { + /// Send external table name (empty name is the main table) + writeStringBinary("", *out); + } state.block_out->write(block); - state.maybe_compressed_out->next(); + + if (state.maybe_compressed_out != out) + state.maybe_compressed_out->next(); + + out->finishChunk(); out->next(); } catch (...) @@ -2296,6 +2385,8 @@ void TCPHandler::sendLogData(const Block & block) writeStringBinary("", *out); state.logs_block_out->write(block); + + out->finishChunk(); out->next(); } @@ -2307,6 +2398,7 @@ void TCPHandler::sendTableColumns(const ColumnsDescription & columns) writeStringBinary("", *out); writeStringBinary(columns.toString(), *out); + out->finishChunk(); out->next(); } @@ -2316,6 +2408,8 @@ void TCPHandler::sendException(const Exception & e, bool with_stack_trace) writeVarUInt(Protocol::Server::Exception, *out); writeException(e, *out, with_stack_trace); + + out->finishChunk(); out->next(); } @@ -2326,6 +2420,8 @@ void TCPHandler::sendEndOfStream() state.io.setAllDataSent(); writeVarUInt(Protocol::Server::EndOfStream, *out); + + out->finishChunk(); out->next(); } @@ -2344,6 +2440,8 @@ void TCPHandler::sendProgress() increment.elapsed_ns = current_elapsed_ns - state.prev_elapsed_ns; state.prev_elapsed_ns = current_elapsed_ns; increment.write(*out, client_tcp_protocol_version); + + out->finishChunk(); out->next(); } diff --git a/src/Server/TCPHandler.h b/src/Server/TCPHandler.h index 74afb5a14a5..dca40e98920 100644 --- a/src/Server/TCPHandler.h +++ b/src/Server/TCPHandler.h @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include "Core/Types.h" #include "IServer.h" @@ -186,6 +188,8 @@ private: UInt64 client_version_minor = 0; UInt64 client_version_patch = 0; UInt32 client_tcp_protocol_version = 0; + String proto_send_chunked_cl = "notchunked"; + String proto_recv_chunked_cl = "notchunked"; String quota_key; /// Connection settings, which are extracted from a context. @@ -204,8 +208,8 @@ private: ClientInfo::QueryKind query_kind = ClientInfo::QueryKind::NO_QUERY; /// Streams for reading/writing from/to client connection socket. - std::shared_ptr in; - std::shared_ptr out; + std::shared_ptr in; + std::shared_ptr out; ProfileEvents::Event read_event; ProfileEvents::Event write_event; diff --git a/src/Server/WebUIRequestHandler.cpp b/src/Server/WebUIRequestHandler.cpp index a3d098014e7..c04d7a3f2a0 100644 --- a/src/Server/WebUIRequestHandler.cpp +++ b/src/Server/WebUIRequestHandler.cpp @@ -30,23 +30,20 @@ DashboardWebUIRequestHandler::DashboardWebUIRequestHandler(IServer & server_) : BinaryWebUIRequestHandler::BinaryWebUIRequestHandler(IServer & server_) : server(server_) {} JavaScriptWebUIRequestHandler::JavaScriptWebUIRequestHandler(IServer & server_) : server(server_) {} -static void handle(const IServer & server, HTTPServerRequest & request, HTTPServerResponse & response, std::string_view html) +static void handle(HTTPServerRequest & request, HTTPServerResponse & response, std::string_view html) { - auto keep_alive_timeout = server.context()->getServerSettings().keep_alive_timeout.totalSeconds(); - response.setContentType("text/html; charset=UTF-8"); if (request.getVersion() == HTTPServerRequest::HTTP_1_1) response.setChunkedTransferEncoding(true); - setResponseDefaultHeaders(response, keep_alive_timeout); + setResponseDefaultHeaders(response); response.setStatusAndReason(Poco::Net::HTTPResponse::HTTP_OK); - WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD, keep_alive_timeout).write(html.data(), html.size()); - + WriteBufferFromHTTPServerResponse(response, request.getMethod() == HTTPRequest::HTTP_HEAD).write(html.data(), html.size()); } void PlayWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) { - handle(server, request, response, {reinterpret_cast(gresource_play_htmlData), gresource_play_htmlSize}); + handle(request, response, {reinterpret_cast(gresource_play_htmlData), gresource_play_htmlSize}); } void DashboardWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) @@ -64,23 +61,23 @@ void DashboardWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HT static re2::RE2 lz_string_url = R"(https://[^\s"'`]+lz-string[^\s"'`]*\.js)"; RE2::Replace(&html, lz_string_url, "/js/lz-string.js"); - handle(server, request, response, html); + handle(request, response, html); } void BinaryWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) { - handle(server, request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); + handle(request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); } void JavaScriptWebUIRequestHandler::handleRequest(HTTPServerRequest & request, HTTPServerResponse & response, const ProfileEvents::Event &) { if (request.getURI() == "/js/uplot.js") { - handle(server, request, response, {reinterpret_cast(gresource_uplot_jsData), gresource_uplot_jsSize}); + handle(request, response, {reinterpret_cast(gresource_uplot_jsData), gresource_uplot_jsSize}); } else if (request.getURI() == "/js/lz-string.js") { - handle(server, request, response, {reinterpret_cast(gresource_lz_string_jsData), gresource_lz_string_jsSize}); + handle(request, response, {reinterpret_cast(gresource_lz_string_jsData), gresource_lz_string_jsSize}); } else { @@ -88,7 +85,7 @@ void JavaScriptWebUIRequestHandler::handleRequest(HTTPServerRequest & request, H *response.send() << "Not found.\n"; } - handle(server, request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); + handle(request, response, {reinterpret_cast(gresource_binary_htmlData), gresource_binary_htmlSize}); } } diff --git a/src/Storages/AlterCommands.cpp b/src/Storages/AlterCommands.cpp index dfb388ffdb2..d5780e32db3 100644 --- a/src/Storages/AlterCommands.cpp +++ b/src/Storages/AlterCommands.cpp @@ -110,7 +110,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ if (ast_col_decl.comment) { const auto & ast_comment = typeid_cast(*ast_col_decl.comment); - command.comment = ast_comment.value.get(); + command.comment = ast_comment.value.safeGet(); } if (ast_col_decl.codec) @@ -168,7 +168,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ if (ast_col_decl.comment) { const auto & ast_comment = ast_col_decl.comment->as(); - command.comment.emplace(ast_comment.value.get()); + command.comment.emplace(ast_comment.value.safeGet()); } if (ast_col_decl.ttl) @@ -211,7 +211,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.type = COMMENT_COLUMN; command.column_name = getIdentifierName(command_ast->column); const auto & ast_comment = command_ast->comment->as(); - command.comment = ast_comment.value.get(); + command.comment = ast_comment.value.safeGet(); command.if_exists = command_ast->if_exists; return command; } @@ -221,7 +221,7 @@ std::optional AlterCommand::parse(const ASTAlterCommand * command_ command.ast = command_ast->clone(); command.type = COMMENT_TABLE; const auto & ast_comment = command_ast->comment->as(); - command.comment = ast_comment.value.get(); + command.comment = ast_comment.value.safeGet(); return command; } else if (command_ast->type == ASTAlterCommand::MODIFY_ORDER_BY) diff --git a/src/Storages/Cache/ExternalDataSourceCache.cpp b/src/Storages/Cache/ExternalDataSourceCache.cpp index cffb1dc9ca3..8c778fd511a 100644 --- a/src/Storages/Cache/ExternalDataSourceCache.cpp +++ b/src/Storages/Cache/ExternalDataSourceCache.cpp @@ -57,8 +57,15 @@ LocalFileHolder::~LocalFileHolder() { if (original_readbuffer) { - assert_cast(original_readbuffer.get())->seek(0, SEEK_SET); - file_cache_controller->value().startBackgroundDownload(std::move(original_readbuffer), *thread_pool); + try + { + assert_cast(original_readbuffer.get())->seek(0, SEEK_SET); + file_cache_controller->value().startBackgroundDownload(std::move(original_readbuffer), *thread_pool); + } + catch (...) + { + tryLogCurrentException(getLogger("LocalFileHolder"), "Exception during destructor of LocalFileHolder."); + } } } diff --git a/src/Storages/ColumnsDescription.cpp b/src/Storages/ColumnsDescription.cpp index da749812167..0d724245b49 100644 --- a/src/Storages/ColumnsDescription.cpp +++ b/src/Storages/ColumnsDescription.cpp @@ -197,7 +197,7 @@ void ColumnDescription::readText(ReadBuffer & buf) } if (col_ast->comment) - comment = col_ast->comment->as().value.get(); + comment = col_ast->comment->as().value.safeGet(); if (col_ast->codec) codec = CompressionCodecFactory::instance().validateCodecAndGetPreprocessedAST(col_ast->codec, type, false, true, true, true); diff --git a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp index fdb4cfcb371..7616b384860 100644 --- a/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp +++ b/src/Storages/Distributed/DistributedAsyncInsertDirectoryQueue.cpp @@ -273,6 +273,8 @@ ConnectionPoolWithFailoverPtr DistributedAsyncInsertDirectoryQueue::createPool(c address.default_database, address.user, address.password, + address.proto_send_chunked, + address.proto_recv_chunked, address.quota_key, address.cluster, address.cluster_secret, diff --git a/src/Storages/Hive/StorageHive.cpp b/src/Storages/Hive/StorageHive.cpp index 089fb5c585c..ae2e8cffe28 100644 --- a/src/Storages/Hive/StorageHive.cpp +++ b/src/Storages/Hive/StorageHive.cpp @@ -445,7 +445,7 @@ StorageHive::StorageHive( storage_metadata.partition_key = KeyDescription::getKeyFromAST(partition_by_ast, storage_metadata.columns, getContext()); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), getContext())); } void StorageHive::lazyInitialize() diff --git a/src/Storages/IndicesDescription.cpp b/src/Storages/IndicesDescription.cpp index cef8fd85f97..753fbf1d635 100644 --- a/src/Storages/IndicesDescription.cpp +++ b/src/Storages/IndicesDescription.cpp @@ -3,6 +3,7 @@ #include #include +#include #include #include #include @@ -130,10 +131,15 @@ IndexDescription IndexDescription::getIndexFromAST(const ASTPtr & definition_ast { for (size_t i = 0; i < index_type->arguments->children.size(); ++i) { - const auto * argument = index_type->arguments->children[i]->as(); - if (!argument) + const auto & child = index_type->arguments->children[i]; + if (const auto * ast_literal = child->as(); ast_literal != nullptr) + /// E.g. INDEX index_name column_name TYPE vector_similarity('hnsw', 'f32') + result.arguments.emplace_back(ast_literal->value); + else if (const auto * ast_identifier = child->as(); ast_identifier != nullptr) + /// E.g. INDEX index_name column_name TYPE vector_similarity(hnsw, f32) + result.arguments.emplace_back(ast_identifier->name()); + else throw Exception(ErrorCodes::INCORRECT_QUERY, "Only literals can be skip index arguments"); - result.arguments.emplace_back(argument->value); } } diff --git a/src/Storages/MaterializedView/RefreshSet.cpp b/src/Storages/MaterializedView/RefreshSet.cpp index a3ef327dc24..7536f59c1e4 100644 --- a/src/Storages/MaterializedView/RefreshSet.cpp +++ b/src/Storages/MaterializedView/RefreshSet.cpp @@ -9,11 +9,6 @@ namespace CurrentMetrics namespace DB { -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; -} - RefreshSet::Handle::Handle(Handle && other) noexcept { *this = std::move(other); @@ -27,6 +22,7 @@ RefreshSet::Handle & RefreshSet::Handle::operator=(Handle && other) noexcept parent_set = std::exchange(other.parent_set, nullptr); id = std::move(other.id); dependencies = std::move(other.dependencies); + iter = std::move(other.iter); metric_increment = std::move(other.metric_increment); return *this; } @@ -39,21 +35,21 @@ RefreshSet::Handle::~Handle() void RefreshSet::Handle::rename(StorageID new_id) { std::lock_guard lock(parent_set->mutex); - parent_set->removeDependenciesLocked(id, dependencies); - auto it = parent_set->tasks.find(id); - auto task = it->second; - parent_set->tasks.erase(it); + RefreshTaskHolder task = *iter; + parent_set->removeDependenciesLocked(task, dependencies); + parent_set->removeTaskLocked(id, iter); id = new_id; - parent_set->tasks.emplace(id, task); - parent_set->addDependenciesLocked(id, dependencies); + iter = parent_set->addTaskLocked(id, task); + parent_set->addDependenciesLocked(task, dependencies); } void RefreshSet::Handle::changeDependencies(std::vector deps) { std::lock_guard lock(parent_set->mutex); - parent_set->removeDependenciesLocked(id, dependencies); + RefreshTaskHolder task = *iter; + parent_set->removeDependenciesLocked(task, dependencies); dependencies = std::move(deps); - parent_set->addDependenciesLocked(id, dependencies); + parent_set->addDependenciesLocked(task, dependencies); } void RefreshSet::Handle::reset() @@ -63,8 +59,8 @@ void RefreshSet::Handle::reset() { std::lock_guard lock(parent_set->mutex); - parent_set->removeDependenciesLocked(id, dependencies); - parent_set->tasks.erase(id); + parent_set->removeDependenciesLocked(*iter, dependencies); + parent_set->removeTaskLocked(id, iter); } parent_set = nullptr; @@ -76,37 +72,50 @@ RefreshSet::RefreshSet() = default; void RefreshSet::emplace(StorageID id, const std::vector & dependencies, RefreshTaskHolder task) { std::lock_guard guard(mutex); - auto [it, is_inserted] = tasks.emplace(id, task); - if (!is_inserted) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Refresh set entry already exists for table {}", id.getFullTableName()); - addDependenciesLocked(id, dependencies); + const auto iter = addTaskLocked(id, task); + addDependenciesLocked(task, dependencies); - task->setRefreshSetHandleUnlock(Handle(this, id, dependencies)); + task->setRefreshSetHandleUnlock(Handle(this, id, iter, dependencies)); } -void RefreshSet::addDependenciesLocked(const StorageID & id, const std::vector & dependencies) +RefreshTaskList::iterator RefreshSet::addTaskLocked(StorageID id, RefreshTaskHolder task) +{ + RefreshTaskList & list = tasks[id]; + list.push_back(task); + return std::prev(list.end()); +} + +void RefreshSet::removeTaskLocked(StorageID id, RefreshTaskList::iterator iter) +{ + const auto it = tasks.find(id); + it->second.erase(iter); + if (it->second.empty()) + tasks.erase(it); +} + +void RefreshSet::addDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies) { for (const StorageID & dep : dependencies) - dependents[dep].insert(id); + dependents[dep].insert(task); } -void RefreshSet::removeDependenciesLocked(const StorageID & id, const std::vector & dependencies) +void RefreshSet::removeDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies) { for (const StorageID & dep : dependencies) { auto & set = dependents[dep]; - set.erase(id); + set.erase(task); if (set.empty()) dependents.erase(dep); } } -RefreshTaskHolder RefreshSet::getTask(const StorageID & id) const +RefreshTaskList RefreshSet::findTasks(const StorageID & id) const { std::lock_guard lock(mutex); - if (auto task = tasks.find(id); task != tasks.end()) - return task->second; - return nullptr; + if (auto it = tasks.find(id); it != tasks.end()) + return it->second; + return {}; } RefreshSet::InfoContainer RefreshSet::getInfo() const @@ -116,26 +125,23 @@ RefreshSet::InfoContainer RefreshSet::getInfo() const lock.unlock(); InfoContainer res; - for (const auto & [id, task] : tasks_copy) - res.push_back(task->getInfo()); + for (const auto & [id, list] : tasks_copy) + for (const auto & task : list) + res.push_back(task->getInfo()); return res; } std::vector RefreshSet::getDependents(const StorageID & id) const { std::lock_guard lock(mutex); - std::vector res; auto it = dependents.find(id); if (it == dependents.end()) return {}; - for (const StorageID & dep_id : it->second) - if (auto task = tasks.find(dep_id); task != tasks.end()) - res.push_back(task->second); - return res; + return std::vector(it->second.begin(), it->second.end()); } -RefreshSet::Handle::Handle(RefreshSet * parent_set_, StorageID id_, std::vector dependencies_) +RefreshSet::Handle::Handle(RefreshSet * parent_set_, StorageID id_, RefreshTaskList::iterator iter_, std::vector dependencies_) : parent_set(parent_set_), id(std::move(id_)), dependencies(std::move(dependencies_)) - , metric_increment(CurrentMetrics::Increment(CurrentMetrics::RefreshableViews)) {} + , iter(iter_), metric_increment(CurrentMetrics::Increment(CurrentMetrics::RefreshableViews)) {} } diff --git a/src/Storages/MaterializedView/RefreshSet.h b/src/Storages/MaterializedView/RefreshSet.h index eff445023a6..7fb583fd316 100644 --- a/src/Storages/MaterializedView/RefreshSet.h +++ b/src/Storages/MaterializedView/RefreshSet.h @@ -5,12 +5,11 @@ #include #include #include +#include namespace DB { -using DatabaseAndTableNameSet = std::unordered_set; - enum class RefreshState : RefreshTaskStateUnderlying { Disabled = 0, @@ -46,8 +45,7 @@ struct RefreshInfo class RefreshSet { public: - /// RAII thing that unregisters a task and its dependencies in destructor. - /// Storage IDs must be unique. Not thread safe. + /// RAII thing that unregisters a task and its dependencies in destructor. Not thread safe. class Handle { friend class RefreshSet; @@ -73,9 +71,10 @@ public: RefreshSet * parent_set = nullptr; StorageID id = StorageID::createEmpty(); std::vector dependencies; + RefreshTaskList::iterator iter; // in parent_set->tasks[id] std::optional metric_increment; - Handle(RefreshSet * parent_set_, StorageID id_, std::vector dependencies_); + Handle(RefreshSet * parent_set_, StorageID id_, RefreshTaskList::iterator iter_, std::vector dependencies_); }; using InfoContainer = std::vector; @@ -84,7 +83,9 @@ public: void emplace(StorageID id, const std::vector & dependencies, RefreshTaskHolder task); - RefreshTaskHolder getTask(const StorageID & id) const; + /// Finds active refreshable view(s) by database and table name. + /// Normally there's at most one, but we allow name collisions here, just in case. + RefreshTaskList findTasks(const StorageID & id) const; InfoContainer getInfo() const; @@ -92,8 +93,8 @@ public: std::vector getDependents(const StorageID & id) const; private: - using TaskMap = std::unordered_map; - using DependentsMap = std::unordered_map; + using TaskMap = std::unordered_map; + using DependentsMap = std::unordered_map, StorageID::DatabaseAndTableNameHash, StorageID::DatabaseAndTableNameEqual>; /// Protects the two maps below, not locked for any nontrivial operations (e.g. operations that /// block or lock other mutexes). @@ -102,8 +103,10 @@ private: TaskMap tasks; DependentsMap dependents; - void addDependenciesLocked(const StorageID & id, const std::vector & dependencies); - void removeDependenciesLocked(const StorageID & id, const std::vector & dependencies); + RefreshTaskList::iterator addTaskLocked(StorageID id, RefreshTaskHolder task); + void removeTaskLocked(StorageID id, RefreshTaskList::iterator iter); + void addDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies); + void removeDependenciesLocked(RefreshTaskHolder task, const std::vector & dependencies); }; } diff --git a/src/Storages/MaterializedView/RefreshTask.cpp b/src/Storages/MaterializedView/RefreshTask.cpp index aa8f51d5295..0837eaf97fd 100644 --- a/src/Storages/MaterializedView/RefreshTask.cpp +++ b/src/Storages/MaterializedView/RefreshTask.cpp @@ -33,7 +33,6 @@ RefreshTask::RefreshTask( {} RefreshTaskHolder RefreshTask::create( - const StorageMaterializedView & view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy) { @@ -46,12 +45,9 @@ RefreshTaskHolder RefreshTask::create( t->refreshTask(); }); - std::vector deps; if (strategy.dependencies) for (auto && dependency : strategy.dependencies->children) - deps.emplace_back(dependency->as()); - - context->getRefreshSet().emplace(view.getStorageID(), deps, task); + task->initial_dependencies.emplace_back(dependency->as()); return task; } @@ -61,6 +57,7 @@ void RefreshTask::initializeAndStart(std::shared_ptr vi view_to_refresh = view; if (view->getContext()->getSettingsRef().stop_refreshable_materialized_views_on_startup) stop_requested = true; + view->getContext()->getRefreshSet().emplace(view->getStorageID(), initial_dependencies, shared_from_this()); populateDependencies(); advanceNextRefreshTime(currentTime()); refresh_task->schedule(); @@ -69,7 +66,8 @@ void RefreshTask::initializeAndStart(std::shared_ptr vi void RefreshTask::rename(StorageID new_id) { std::lock_guard guard(mutex); - set_handle.rename(new_id); + if (set_handle) + set_handle.rename(new_id); } void RefreshTask::alterRefreshParams(const DB::ASTRefreshStrategy & new_strategy) diff --git a/src/Storages/MaterializedView/RefreshTask.h b/src/Storages/MaterializedView/RefreshTask.h index 1f050a97cd9..623493f6aec 100644 --- a/src/Storages/MaterializedView/RefreshTask.h +++ b/src/Storages/MaterializedView/RefreshTask.h @@ -26,7 +26,6 @@ public: /// The only proper way to construct task static RefreshTaskHolder create( - const StorageMaterializedView & view, ContextMutablePtr context, const DB::ASTRefreshStrategy & strategy); @@ -84,9 +83,11 @@ private: RefreshSchedule refresh_schedule; RefreshSettings refresh_settings; // TODO: populate, use, update on alter + std::vector initial_dependencies; RefreshSet::Handle set_handle; /// StorageIDs of our dependencies that we're waiting for. + using DatabaseAndTableNameSet = std::unordered_set; DatabaseAndTableNameSet remaining_dependencies; bool time_arrived = false; diff --git a/src/Storages/MaterializedView/RefreshTask_fwd.h b/src/Storages/MaterializedView/RefreshTask_fwd.h index 1f366962eb6..9a0a122381e 100644 --- a/src/Storages/MaterializedView/RefreshTask_fwd.h +++ b/src/Storages/MaterializedView/RefreshTask_fwd.h @@ -11,5 +11,6 @@ class RefreshTask; using RefreshTaskStateUnderlying = UInt8; using RefreshTaskHolder = std::shared_ptr; using RefreshTaskObserver = std::weak_ptr; +using RefreshTaskList = std::list; } diff --git a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp b/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp deleted file mode 100644 index 7354243732c..00000000000 --- a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.cpp +++ /dev/null @@ -1,507 +0,0 @@ -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int LOGICAL_ERROR; - extern const int INCORRECT_QUERY; -} - -namespace -{ - -template -void extractReferenceVectorFromLiteral(ApproximateNearestNeighborInformation::Embedding & reference_vector, Literal literal) -{ - Float64 float_element_of_reference_vector; - Int64 int_element_of_reference_vector; - - for (const auto & value : literal.value()) - { - if (value.tryGet(float_element_of_reference_vector)) - reference_vector.emplace_back(float_element_of_reference_vector); - else if (value.tryGet(int_element_of_reference_vector)) - reference_vector.emplace_back(static_cast(int_element_of_reference_vector)); - else - throw Exception(ErrorCodes::INCORRECT_QUERY, "Wrong type of elements in reference vector. Only float or int are supported."); - } -} - -ApproximateNearestNeighborInformation::Metric stringToMetric(std::string_view metric) -{ - if (metric == "L2Distance") - return ApproximateNearestNeighborInformation::Metric::L2; - else if (metric == "LpDistance") - return ApproximateNearestNeighborInformation::Metric::Lp; - else - return ApproximateNearestNeighborInformation::Metric::Unknown; -} - -} - -ApproximateNearestNeighborCondition::ApproximateNearestNeighborCondition(const SelectQueryInfo & query_info, ContextPtr context) - : block_with_constants(KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context)) - , index_granularity(context->getMergeTreeSettings().index_granularity) - , max_limit_for_ann_queries(context->getSettingsRef().max_limit_for_ann_queries) - , index_is_useful(checkQueryStructure(query_info)) -{} - -bool ApproximateNearestNeighborCondition::alwaysUnknownOrTrue(String metric) const -{ - if (!index_is_useful) - return true; // Query isn't supported - // If query is supported, check metrics for match - return !(stringToMetric(metric) == query_information->metric); -} - -float ApproximateNearestNeighborCondition::getComparisonDistanceForWhereQuery() const -{ - if (index_is_useful && query_information.has_value() - && query_information->type == ApproximateNearestNeighborInformation::Type::Where) - return query_information->distance; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Not supported method for this query type"); -} - -UInt64 ApproximateNearestNeighborCondition::getLimit() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->limit; - throw Exception(ErrorCodes::LOGICAL_ERROR, "No LIMIT section in query, not supported"); -} - -std::vector ApproximateNearestNeighborCondition::getReferenceVector() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->reference_vector; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Reference vector was requested for useless or uninitialized index."); -} - -size_t ApproximateNearestNeighborCondition::getDimensions() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->reference_vector.size(); - throw Exception(ErrorCodes::LOGICAL_ERROR, "Number of dimensions was requested for useless or uninitialized index."); -} - -String ApproximateNearestNeighborCondition::getColumnName() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->column_name; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Column name was requested for useless or uninitialized index."); -} - -ApproximateNearestNeighborInformation::Metric ApproximateNearestNeighborCondition::getMetricType() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->metric; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Metric name was requested for useless or uninitialized index."); -} - -float ApproximateNearestNeighborCondition::getPValueForLpDistance() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->p_for_lp_dist; - throw Exception(ErrorCodes::LOGICAL_ERROR, "P from LPDistance was requested for useless or uninitialized index."); -} - -ApproximateNearestNeighborInformation::Type ApproximateNearestNeighborCondition::getQueryType() const -{ - if (index_is_useful && query_information.has_value()) - return query_information->type; - throw Exception(ErrorCodes::LOGICAL_ERROR, "Query type was requested for useless or uninitialized index."); -} - -bool ApproximateNearestNeighborCondition::checkQueryStructure(const SelectQueryInfo & query) -{ - /// RPN-s for different sections of the query - RPN rpn_prewhere_clause; - RPN rpn_where_clause; - RPN rpn_order_by_clause; - RPNElement rpn_limit; - UInt64 limit; - - ApproximateNearestNeighborInformation prewhere_info; - ApproximateNearestNeighborInformation where_info; - ApproximateNearestNeighborInformation order_by_info; - - /// Build rpns for query sections - const auto & select = query.query->as(); - - /// If query has PREWHERE clause - if (select.prewhere()) - traverseAST(select.prewhere(), rpn_prewhere_clause); - - /// If query has WHERE clause - if (select.where()) - traverseAST(select.where(), rpn_where_clause); - - /// If query has LIMIT clause - if (select.limitLength()) - traverseAtomAST(select.limitLength(), rpn_limit); - - if (select.orderBy()) // If query has ORDERBY clause - traverseOrderByAST(select.orderBy(), rpn_order_by_clause); - - /// Reverse RPNs for conveniences during parsing - std::reverse(rpn_prewhere_clause.begin(), rpn_prewhere_clause.end()); - std::reverse(rpn_where_clause.begin(), rpn_where_clause.end()); - std::reverse(rpn_order_by_clause.begin(), rpn_order_by_clause.end()); - - /// Match rpns with supported types and extract information - const bool prewhere_is_valid = matchRPNWhere(rpn_prewhere_clause, prewhere_info); - const bool where_is_valid = matchRPNWhere(rpn_where_clause, where_info); - const bool order_by_is_valid = matchRPNOrderBy(rpn_order_by_clause, order_by_info); - const bool limit_is_valid = matchRPNLimit(rpn_limit, limit); - - /// Query without a LIMIT clause or with a limit greater than a restriction is not supported - if (!limit_is_valid || max_limit_for_ann_queries < limit) - return false; - - /// Search type query in both sections isn't supported - if (prewhere_is_valid && where_is_valid) - return false; - - /// Search type should be in WHERE or PREWHERE clause - if (prewhere_is_valid || where_is_valid) - query_information = std::move(prewhere_is_valid ? prewhere_info : where_info); - - if (order_by_is_valid) - { - /// Query with valid where and order by type is not supported - if (query_information.has_value()) - return false; - - query_information = std::move(order_by_info); - } - - if (query_information) - query_information->limit = limit; - - return query_information.has_value(); -} - -void ApproximateNearestNeighborCondition::traverseAST(const ASTPtr & node, RPN & rpn) -{ - // If the node is ASTFunction, it may have children nodes - if (const auto * func = node->as()) - { - const ASTs & children = func->arguments->children; - // Traverse children nodes - for (const auto& child : children) - traverseAST(child, rpn); - } - - RPNElement element; - /// Get the data behind node - if (!traverseAtomAST(node, element)) - element.function = RPNElement::FUNCTION_UNKNOWN; - - rpn.emplace_back(std::move(element)); -} - -bool ApproximateNearestNeighborCondition::traverseAtomAST(const ASTPtr & node, RPNElement & out) -{ - /// Match Functions - if (const auto * function = node->as()) - { - /// Set the name - out.func_name = function->name; - - if (function->name == "L1Distance" || - function->name == "L2Distance" || - function->name == "LinfDistance" || - function->name == "cosineDistance" || - function->name == "dotProduct" || - function->name == "LpDistance") - out.function = RPNElement::FUNCTION_DISTANCE; - else if (function->name == "tuple") - out.function = RPNElement::FUNCTION_TUPLE; - else if (function->name == "array") - out.function = RPNElement::FUNCTION_ARRAY; - else if (function->name == "less" || - function->name == "greater" || - function->name == "lessOrEquals" || - function->name == "greaterOrEquals") - out.function = RPNElement::FUNCTION_COMPARISON; - else if (function->name == "_CAST") - out.function = RPNElement::FUNCTION_CAST; - else - return false; - - return true; - } - /// Match identifier - else if (const auto * identifier = node->as()) - { - out.function = RPNElement::FUNCTION_IDENTIFIER; - out.identifier.emplace(identifier->name()); - out.func_name = "column identifier"; - - return true; - } - - /// Check if we have constants behind the node - return tryCastToConstType(node, out); -} - -bool ApproximateNearestNeighborCondition::tryCastToConstType(const ASTPtr & node, RPNElement & out) -{ - Field const_value; - DataTypePtr const_type; - - if (KeyCondition::getConstant(node, block_with_constants, const_value, const_type)) - { - /// Check for constant types - if (const_value.getType() == Field::Types::Float64) - { - out.function = RPNElement::FUNCTION_FLOAT_LITERAL; - out.float_literal.emplace(const_value.get()); - out.func_name = "Float literal"; - return true; - } - - if (const_value.getType() == Field::Types::UInt64) - { - out.function = RPNElement::FUNCTION_INT_LITERAL; - out.int_literal.emplace(const_value.get()); - out.func_name = "Int literal"; - return true; - } - - if (const_value.getType() == Field::Types::Int64) - { - out.function = RPNElement::FUNCTION_INT_LITERAL; - out.int_literal.emplace(const_value.get()); - out.func_name = "Int literal"; - return true; - } - - if (const_value.getType() == Field::Types::Tuple) - { - out.function = RPNElement::FUNCTION_LITERAL_TUPLE; - out.tuple_literal = const_value.get(); - out.func_name = "Tuple literal"; - return true; - } - - if (const_value.getType() == Field::Types::Array) - { - out.function = RPNElement::FUNCTION_LITERAL_ARRAY; - out.array_literal = const_value.get(); - out.func_name = "Array literal"; - return true; - } - - if (const_value.getType() == Field::Types::String) - { - out.function = RPNElement::FUNCTION_STRING_LITERAL; - out.func_name = const_value.get(); - return true; - } - } - - return false; -} - -void ApproximateNearestNeighborCondition::traverseOrderByAST(const ASTPtr & node, RPN & rpn) -{ - if (const auto * expr_list = node->as()) - if (const auto * order_by_element = expr_list->children.front()->as()) - traverseAST(order_by_element->children.front(), rpn); -} - -/// Returns true and stores ApproximateNearestNeighborInformation if the query has valid WHERE clause -bool ApproximateNearestNeighborCondition::matchRPNWhere(RPN & rpn, ApproximateNearestNeighborInformation & ann_info) -{ - /// Fill query type field - ann_info.type = ApproximateNearestNeighborInformation::Type::Where; - - /// WHERE section must have at least 5 expressions - /// Operator->Distance(float)->DistanceFunc->Column->Tuple(Array)Func(ReferenceVector(floats)) - if (rpn.size() < 5) - return false; - - auto iter = rpn.begin(); - - /// Query starts from operator less - if (iter->function != RPNElement::FUNCTION_COMPARISON) - return false; - - const bool greater_case = iter->func_name == "greater" || iter->func_name == "greaterOrEquals"; - const bool less_case = iter->func_name == "less" || iter->func_name == "lessOrEquals"; - - ++iter; - - if (less_case) - { - if (iter->function != RPNElement::FUNCTION_FLOAT_LITERAL) - return false; - - ann_info.distance = getFloatOrIntLiteralOrPanic(iter); - if (ann_info.distance < 0) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Distance can't be negative. Got {}", ann_info.distance); - - ++iter; - - } - else if (!greater_case) - return false; - - auto end = rpn.end(); - if (!matchMainParts(iter, end, ann_info)) - return false; - - if (greater_case) - { - if (ann_info.reference_vector.size() < 2) - return false; - ann_info.distance = ann_info.reference_vector.back(); - if (ann_info.distance < 0) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Distance can't be negative. Got {}", ann_info.distance); - ann_info.reference_vector.pop_back(); - } - - /// query is ok - return true; -} - -/// Returns true and stores ANNExpr if the query has valid ORDERBY clause -bool ApproximateNearestNeighborCondition::matchRPNOrderBy(RPN & rpn, ApproximateNearestNeighborInformation & ann_info) -{ - /// Fill query type field - ann_info.type = ApproximateNearestNeighborInformation::Type::OrderBy; - - // ORDER BY clause must have at least 3 expressions - if (rpn.size() < 3) - return false; - - auto iter = rpn.begin(); - auto end = rpn.end(); - - return ApproximateNearestNeighborCondition::matchMainParts(iter, end, ann_info); -} - -/// Returns true and stores Length if we have valid LIMIT clause in query -bool ApproximateNearestNeighborCondition::matchRPNLimit(RPNElement & rpn, UInt64 & limit) -{ - if (rpn.function == RPNElement::FUNCTION_INT_LITERAL) - { - limit = rpn.int_literal.value(); - return true; - } - - return false; -} - -/// Matches dist function, referencer vector, column name -bool ApproximateNearestNeighborCondition::matchMainParts(RPN::iterator & iter, const RPN::iterator & end, ApproximateNearestNeighborInformation & ann_info) -{ - bool identifier_found = false; - - /// Matches DistanceFunc->[Column]->[Tuple(array)Func]->ReferenceVector(floats)->[Column] - if (iter->function != RPNElement::FUNCTION_DISTANCE) - return false; - - ann_info.metric = stringToMetric(iter->func_name); - ++iter; - - if (ann_info.metric == ApproximateNearestNeighborInformation::Metric::Lp) - { - if (iter->function != RPNElement::FUNCTION_FLOAT_LITERAL && - iter->function != RPNElement::FUNCTION_INT_LITERAL) - return false; - ann_info.p_for_lp_dist = getFloatOrIntLiteralOrPanic(iter); - ++iter; - } - - if (iter->function == RPNElement::FUNCTION_IDENTIFIER) - { - identifier_found = true; - ann_info.column_name = std::move(iter->identifier.value()); - ++iter; - } - - if (iter->function == RPNElement::FUNCTION_TUPLE || iter->function == RPNElement::FUNCTION_ARRAY) - ++iter; - - if (iter->function == RPNElement::FUNCTION_LITERAL_TUPLE) - { - extractReferenceVectorFromLiteral(ann_info.reference_vector, iter->tuple_literal); - ++iter; - } - - if (iter->function == RPNElement::FUNCTION_LITERAL_ARRAY) - { - extractReferenceVectorFromLiteral(ann_info.reference_vector, iter->array_literal); - ++iter; - } - - /// further conditions are possible if there is no tuple or array, or no identifier is found - /// the tuple or array can be inside a cast function. For other cases, see the loop after this condition - if (iter != end && iter->function == RPNElement::FUNCTION_CAST) - { - ++iter; - /// Cast should be made to array or tuple - if (!iter->func_name.starts_with("Array") && !iter->func_name.starts_with("Tuple")) - return false; - ++iter; - if (iter->function == RPNElement::FUNCTION_LITERAL_TUPLE) - { - extractReferenceVectorFromLiteral(ann_info.reference_vector, iter->tuple_literal); - ++iter; - } - else if (iter->function == RPNElement::FUNCTION_LITERAL_ARRAY) - { - extractReferenceVectorFromLiteral(ann_info.reference_vector, iter->array_literal); - ++iter; - } - else - return false; - } - - while (iter != end) - { - if (iter->function == RPNElement::FUNCTION_FLOAT_LITERAL || - iter->function == RPNElement::FUNCTION_INT_LITERAL) - ann_info.reference_vector.emplace_back(getFloatOrIntLiteralOrPanic(iter)); - else if (iter->function == RPNElement::FUNCTION_IDENTIFIER) - { - if (identifier_found) - return false; - ann_info.column_name = std::move(iter->identifier.value()); - identifier_found = true; - } - else - return false; - - ++iter; - } - - /// Final checks of correctness - return identifier_found && !ann_info.reference_vector.empty(); -} - -/// Gets float or int from AST node -float ApproximateNearestNeighborCondition::getFloatOrIntLiteralOrPanic(const RPN::iterator& iter) -{ - if (iter->float_literal.has_value()) - return iter->float_literal.value(); - if (iter->int_literal.has_value()) - return static_cast(iter->int_literal.value()); - throw Exception(ErrorCodes::INCORRECT_QUERY, "Wrong parsed AST in buildRPN\n"); -} - -} diff --git a/src/Storages/MergeTree/IMergeTreeDataPart.cpp b/src/Storages/MergeTree/IMergeTreeDataPart.cpp index 3a44359b537..195aa4fdc10 100644 --- a/src/Storages/MergeTree/IMergeTreeDataPart.cpp +++ b/src/Storages/MergeTree/IMergeTreeDataPart.cpp @@ -428,7 +428,7 @@ std::pair IMergeTreeDataPart::getMinMaxDate() const if (storage.minmax_idx_date_column_pos != -1 && minmax_idx->initialized) { const auto & hyperrectangle = minmax_idx->hyperrectangle[storage.minmax_idx_date_column_pos]; - return {DayNum(hyperrectangle.left.get()), DayNum(hyperrectangle.right.get())}; + return {DayNum(hyperrectangle.left.safeGet()), DayNum(hyperrectangle.right.safeGet())}; } else return {}; @@ -444,15 +444,15 @@ std::pair IMergeTreeDataPart::getMinMaxTime() const if (hyperrectangle.left.getType() == Field::Types::UInt64) { assert(hyperrectangle.right.getType() == Field::Types::UInt64); - return {hyperrectangle.left.get(), hyperrectangle.right.get()}; + return {hyperrectangle.left.safeGet(), hyperrectangle.right.safeGet()}; } /// The case of DateTime64 else if (hyperrectangle.left.getType() == Field::Types::Decimal64) { assert(hyperrectangle.right.getType() == Field::Types::Decimal64); - auto left = hyperrectangle.left.get>(); - auto right = hyperrectangle.right.get>(); + auto left = hyperrectangle.left.safeGet>(); + auto right = hyperrectangle.right.safeGet>(); assert(left.getScale() == right.getScale()); @@ -749,8 +749,16 @@ void IMergeTreeDataPart::loadColumnsChecksumsIndexes(bool require_columns_checks /// Probably there is something wrong with files of this part. /// So it can be helpful to add to the error message some information about those files. String files_in_part; + for (auto it = getDataPartStorage().iterate(); it->isValid(); it->next()) - files_in_part += fmt::format("{}{} ({} bytes)", (files_in_part.empty() ? "" : ", "), it->name(), getDataPartStorage().getFileSize(it->name())); + { + std::string file_info; + if (!getDataPartStorage().isDirectory(it->name())) + file_info = fmt::format(" ({} bytes)", getDataPartStorage().getFileSize(it->name())); + + files_in_part += fmt::format("{}{}{}", (files_in_part.empty() ? "" : ", "), it->name(), file_info); + + } if (!files_in_part.empty()) e->addMessage("Part contains files: {}", files_in_part); if (isEmpty()) @@ -1654,11 +1662,9 @@ void IMergeTreeDataPart::loadColumns(bool require) } -/// Project part / part with project parts / compact part doesn't support LWD. bool IMergeTreeDataPart::supportLightweightDeleteMutate() const { - return (part_type == MergeTreeDataPartType::Wide || part_type == MergeTreeDataPartType::Compact) && - parent_part == nullptr && projection_parts.empty(); + return (part_type == MergeTreeDataPartType::Wide || part_type == MergeTreeDataPartType::Compact); } bool IMergeTreeDataPart::hasLightweightDelete() const @@ -2141,7 +2147,27 @@ void IMergeTreeDataPart::checkConsistencyBase() const } } - checksums.checkSizes(getDataPartStorage()); + const auto & data_part_storage = getDataPartStorage(); + for (const auto & [filename, checksum] : checksums.files) + { + try + { + checksum.checkSize(data_part_storage, filename); + } + catch (const Exception & ex) + { + /// For projection parts check will mark them broken in loadProjections + if (!parent_part && filename.ends_with(".proj")) + { + std::string projection_name = fs::path(filename).stem(); + LOG_INFO(storage.log, "Projection {} doesn't exist on start for part {}, marking it as broken", projection_name, name); + if (hasProjection(projection_name)) + markProjectionPartAsBroken(projection_name, ex.message(), ex.code()); + } + else + throw; + } + } } else { diff --git a/src/Storages/MergeTree/KeyCondition.cpp b/src/Storages/MergeTree/KeyCondition.cpp index dfb43c4e75d..2e57e172a7f 100644 --- a/src/Storages/MergeTree/KeyCondition.cpp +++ b/src/Storages/MergeTree/KeyCondition.cpp @@ -349,7 +349,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - String prefix = extractFixedPrefixFromLikePattern(value.get(), /*requires_perfect_prefix*/ false); + String prefix = extractFixedPrefixFromLikePattern(value.safeGet(), /*requires_perfect_prefix*/ false); if (prefix.empty()) return false; @@ -370,7 +370,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - String prefix = extractFixedPrefixFromLikePattern(value.get(), /*requires_perfect_prefix*/ true); + String prefix = extractFixedPrefixFromLikePattern(value.safeGet(), /*requires_perfect_prefix*/ true); if (prefix.empty()) return false; @@ -391,7 +391,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - String prefix = value.get(); + String prefix = value.safeGet(); if (prefix.empty()) return false; @@ -412,7 +412,7 @@ const KeyCondition::AtomMap KeyCondition::atom_map if (value.getType() != Field::Types::String) return false; - const String & expression = value.get(); + const String & expression = value.safeGet(); /// This optimization can't process alternation - this would require /// a comprehensive parsing of regular expression. @@ -1956,11 +1956,8 @@ bool KeyCondition::extractAtomFromTree(const RPNBuilderTreeNode & node, RPNEleme auto common_type_maybe_nullable = (key_expr_type_is_nullable && !common_type->isNullable()) ? DataTypePtr(std::make_shared(common_type)) : common_type; - ColumnsWithTypeAndName arguments{ - {nullptr, key_expr_type, ""}, - {DataTypeString().createColumnConst(1, common_type_maybe_nullable->getName()), common_type_maybe_nullable, ""}}; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); - auto func_cast = func_builder_cast->build(arguments); + + auto func_cast = createInternalCast({key_expr_type, {}}, common_type_maybe_nullable, CastType::nonAccurate, {}); /// If we know the given range only contains one value, then we treat all functions as positive monotonic. if (!single_point && !func_cast->hasInformationAboutMonotonicity()) @@ -2931,8 +2928,8 @@ BoolMask KeyCondition::checkInHyperrectangle( /// Let's support only the case of 2d, because I'm not confident in other cases. if (num_dimensions == 2) { - UInt64 left = key_range.left.get(); - UInt64 right = key_range.right.get(); + UInt64 left = key_range.left.safeGet(); + UInt64 right = key_range.right.safeGet(); BoolMask mask(false, true); auto hyperrectangle_intersection_callback = [&](std::array, 2> curve_hyperrectangle) diff --git a/src/Storages/MergeTree/MergeProgress.h b/src/Storages/MergeTree/MergeProgress.h index dd4922051b5..8562e81e761 100644 --- a/src/Storages/MergeTree/MergeProgress.h +++ b/src/Storages/MergeTree/MergeProgress.h @@ -8,10 +8,10 @@ namespace ProfileEvents { - extern const Event MergesTimeMilliseconds; extern const Event MergedUncompressedBytes; extern const Event MergedRows; - extern const Event Merge; + extern const Event MutatedRows; + extern const Event MutatedUncompressedBytes; } namespace DB @@ -63,18 +63,17 @@ public: void updateWatch() { UInt64 watch_curr_elapsed = merge_list_element_ptr->watch.elapsed(); - ProfileEvents::increment(ProfileEvents::MergesTimeMilliseconds, (watch_curr_elapsed - watch_prev_elapsed) / 1000000); watch_prev_elapsed = watch_curr_elapsed; } - void operator() (const Progress & value) + void operator()(const Progress & value) { - ProfileEvents::increment(ProfileEvents::MergedUncompressedBytes, value.read_bytes); - if (stage.is_first) - { - ProfileEvents::increment(ProfileEvents::MergedRows, value.read_rows); - ProfileEvents::increment(ProfileEvents::Merge); - } + if (merge_list_element_ptr->is_mutation) + updateProfileEvents(value, ProfileEvents::MutatedRows, ProfileEvents::MutatedUncompressedBytes); + else + updateProfileEvents(value, ProfileEvents::MergedRows, ProfileEvents::MergedUncompressedBytes); + + updateWatch(); merge_list_element_ptr->bytes_read_uncompressed += value.read_bytes; @@ -90,6 +89,14 @@ public: std::memory_order_relaxed); } } + +private: + void updateProfileEvents(const Progress & value, ProfileEvents::Event rows_event, ProfileEvents::Event bytes_event) const + { + ProfileEvents::increment(bytes_event, value.read_bytes); + if (stage.is_first) + ProfileEvents::increment(rows_event, value.read_rows); + } }; } diff --git a/src/Storages/MergeTree/MergeTask.cpp b/src/Storages/MergeTree/MergeTask.cpp index ce06adf110c..95e00773bae 100644 --- a/src/Storages/MergeTree/MergeTask.cpp +++ b/src/Storages/MergeTree/MergeTask.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -39,6 +40,18 @@ #include #include +namespace ProfileEvents +{ + extern const Event Merge; + extern const Event MergedColumns; + extern const Event GatheredColumns; + extern const Event MergeTotalMilliseconds; + extern const Event MergeExecuteMilliseconds; + extern const Event MergeHorizontalStageExecuteMilliseconds; + extern const Event MergeVerticalStageExecuteMilliseconds; + extern const Event MergeProjectionStageExecuteMilliseconds; +} + namespace DB { @@ -169,6 +182,8 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::extractMergingAndGatheringColu bool MergeTask::ExecuteAndFinalizeHorizontalPart::prepare() { + ProfileEvents::increment(ProfileEvents::Merge); + String local_tmp_prefix; if (global_ctx->need_prefix) { @@ -446,6 +461,13 @@ void MergeTask::addGatheringColumn(GlobalRuntimeContextPtr global_ctx, const Str MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::getContextForNextStage() { + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeHorizontalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } + auto new_ctx = std::make_shared(); new_ctx->rows_sources_write_buf = std::move(ctx->rows_sources_write_buf); @@ -463,8 +485,14 @@ MergeTask::StageRuntimeContextPtr MergeTask::ExecuteAndFinalizeHorizontalPart::g MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNextStage() { - auto new_ctx = std::make_shared(); + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeVerticalStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } + auto new_ctx = std::make_shared(); new_ctx->need_sync = std::move(ctx->need_sync); ctx.reset(); @@ -474,9 +502,14 @@ MergeTask::StageRuntimeContextPtr MergeTask::VerticalMergeStage::getContextForNe bool MergeTask::ExecuteAndFinalizeHorizontalPart::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -534,7 +567,7 @@ bool MergeTask::ExecuteAndFinalizeHorizontalPart::executeImpl() bool MergeTask::VerticalMergeStage::prepareVerticalMergeForAllColumns() const { - /// No need to execute this part if it is horizontal merge. + /// No need to execute this part if it is horizontal merge. if (global_ctx->chosen_merge_algorithm != MergeAlgorithm::Vertical) return false; @@ -784,6 +817,9 @@ bool MergeTask::MergeProjectionsStage::mergeMinMaxIndexAndPrepareProjections() c /// Print overall profiling info. NOTE: it may duplicates previous messages { + ProfileEvents::increment(ProfileEvents::MergedColumns, global_ctx->merging_columns.size()); + ProfileEvents::increment(ProfileEvents::GatheredColumns, global_ctx->gathering_columns.size()); + double elapsed_seconds = global_ctx->merge_list_element_ptr->watch.elapsedSeconds(); LOG_DEBUG(ctx->log, "Merge sorted {} rows, containing {} columns ({} merged, {} gathered) in {} sec., {} rows/sec., {}/sec.", @@ -906,12 +942,29 @@ bool MergeTask::MergeProjectionsStage::finalizeProjectionsAndWholeMerge() const return false; } +MergeTask::StageRuntimeContextPtr MergeTask::MergeProjectionsStage::getContextForNextStage() +{ + /// Do not increment for projection stage because time is already accounted in main task. + /// The projection stage has its own empty projection stage which may add a drift of several milliseconds. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(ProfileEvents::MergeExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + ProfileEvents::increment(ProfileEvents::MergeProjectionStageExecuteMilliseconds, ctx->elapsed_execute_ns / 1000000UL); + } + + return nullptr; +} bool MergeTask::VerticalMergeStage::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -920,9 +973,14 @@ bool MergeTask::VerticalMergeStage::execute() bool MergeTask::MergeProjectionsStage::execute() { - assert(subtasks_iterator != subtasks.end()); - if ((this->**subtasks_iterator)()) - return true; + chassert(subtasks_iterator != subtasks.end()); + + Stopwatch watch; + bool res = (this->**subtasks_iterator)(); + ctx->elapsed_execute_ns += watch.elapsedNanoseconds(); + + if (res) + return res; /// Move to the next subtask in an array of subtasks ++subtasks_iterator; @@ -969,12 +1027,26 @@ bool MergeTask::VerticalMergeStage::executeVerticalMergeForAllColumns() const bool MergeTask::execute() { - assert(stages_iterator != stages.end()); - if ((*stages_iterator)->execute()) + chassert(stages_iterator != stages.end()); + const auto & current_stage = *stages_iterator; + + if (current_stage->execute()) return true; - /// Stage is finished, need initialize context for the next stage - auto next_stage_context = (*stages_iterator)->getContextForNextStage(); + /// Stage is finished, need to initialize context for the next stage and update profile events. + + UInt64 current_elapsed_ms = global_ctx->merge_list_element_ptr->watch.elapsedMilliseconds(); + UInt64 stage_elapsed_ms = current_elapsed_ms - global_ctx->prev_elapsed_ms; + global_ctx->prev_elapsed_ms = current_elapsed_ms; + + auto next_stage_context = current_stage->getContextForNextStage(); + + /// Do not increment for projection stage because time is already accounted in main task. + if (global_ctx->parent_part == nullptr) + { + ProfileEvents::increment(current_stage->getTotalTimeProfileEvent(), stage_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MergeTotalMilliseconds, stage_elapsed_ms); + } /// Move to the next stage in an array of stages ++stages_iterator; @@ -1099,7 +1171,6 @@ void MergeTask::ExecuteAndFinalizeHorizontalPart::createMergedStream() /* limit_= */0, /* always_read_till_end_= */false, ctx->rows_sources_write_buf.get(), - true, ctx->blocks_are_granules_size); break; diff --git a/src/Storages/MergeTree/MergeTask.h b/src/Storages/MergeTree/MergeTask.h index 8b0f2130e8e..38ccc287187 100644 --- a/src/Storages/MergeTree/MergeTask.h +++ b/src/Storages/MergeTree/MergeTask.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -26,6 +27,12 @@ #include #include +namespace ProfileEvents +{ + extern const Event MergeHorizontalStageTotalMilliseconds; + extern const Event MergeVerticalStageTotalMilliseconds; + extern const Event MergeProjectionStageTotalMilliseconds; +} namespace DB { @@ -134,6 +141,7 @@ private: { virtual void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) = 0; virtual StageRuntimeContextPtr getContextForNextStage() = 0; + virtual ProfileEvents::Event getTotalTimeProfileEvent() const = 0; virtual bool execute() = 0; virtual ~IStage() = default; }; @@ -195,6 +203,7 @@ private: bool need_prefix; scope_guard temporary_directory_lock; + UInt64 prev_elapsed_ms{0}; }; using GlobalRuntimeContextPtr = std::shared_ptr; @@ -233,6 +242,7 @@ private: /// Dependencies for next stages std::list::const_iterator it_name_and_type; bool need_sync{false}; + UInt64 elapsed_execute_ns{0}; }; using ExecuteAndFinalizeHorizontalPartRuntimeContextPtr = std::shared_ptr; @@ -256,7 +266,6 @@ private: ExecuteAndFinalizeHorizontalPartSubtasks::const_iterator subtasks_iterator = subtasks.begin(); - MergeAlgorithm chooseMergeAlgorithm() const; void createMergedStream(); void extractMergingAndGatheringColumns() const; @@ -268,6 +277,7 @@ private: } StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeHorizontalStageTotalMilliseconds; } ExecuteAndFinalizeHorizontalPartRuntimeContextPtr ctx; GlobalRuntimeContextPtr global_ctx; @@ -307,6 +317,7 @@ private: QueryPipeline column_parts_pipeline; std::unique_ptr executor; std::unique_ptr rows_sources_read_buf{nullptr}; + UInt64 elapsed_execute_ns{0}; }; using VerticalMergeRuntimeContextPtr = std::shared_ptr; @@ -321,6 +332,7 @@ private: global_ctx = static_pointer_cast(global); } StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeVerticalStageTotalMilliseconds; } bool prepareVerticalMergeForAllColumns() const; bool executeVerticalMergeForAllColumns() const; @@ -361,6 +373,7 @@ private: MergeTasks::iterator projections_iterator; LoggerPtr log{getLogger("MergeTask::MergeProjectionsStage")}; + UInt64 elapsed_execute_ns{0}; }; using MergeProjectionsRuntimeContextPtr = std::shared_ptr; @@ -368,12 +381,15 @@ private: struct MergeProjectionsStage : public IStage { bool execute() override; + void setRuntimeContext(StageRuntimeContextPtr local, StageRuntimeContextPtr global) override { ctx = static_pointer_cast(local); global_ctx = static_pointer_cast(global); } - StageRuntimeContextPtr getContextForNextStage() override { return nullptr; } + + StageRuntimeContextPtr getContextForNextStage() override; + ProfileEvents::Event getTotalTimeProfileEvent() const override { return ProfileEvents::MergeProjectionStageTotalMilliseconds; } bool mergeMinMaxIndexAndPrepareProjections() const; bool executeProjections() const; diff --git a/src/Storages/MergeTree/MergeTreeData.cpp b/src/Storages/MergeTree/MergeTreeData.cpp index a291c39f29d..94f6d196b99 100644 --- a/src/Storages/MergeTree/MergeTreeData.cpp +++ b/src/Storages/MergeTree/MergeTreeData.cpp @@ -1146,7 +1146,7 @@ std::optional MergeTreeData::totalRowsByPartitionPredicateImpl( auto metadata_snapshot = getInMemoryMetadataPtr(); auto virtual_columns_block = getBlockWithVirtualsForFilter(metadata_snapshot, {parts[0]}); - auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr); + auto filter_dag = VirtualColumnUtils::splitFilterDagForAllowedInputs(filter_actions_dag.getOutputs().at(0), nullptr, /*allow_partial_result=*/ false); if (!filter_dag) return {}; @@ -2351,7 +2351,7 @@ size_t MergeTreeData::clearOldTemporaryDirectories(const String & root_path, siz /// We don't control the amount of refs for temporary parts so we cannot decide can we remove blobs /// or not. So we are not doing it bool keep_shared = false; - if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication) + if (disk->supportZeroCopyReplication() && settings->allow_remote_fs_zero_copy_replication && supportsReplication()) { LOG_WARNING(log, "Since zero-copy replication is enabled we are not going to remove blobs from shared storage for {}", full_path); keep_shared = true; @@ -3517,7 +3517,7 @@ void MergeTreeData::checkAlterIsPossible(const AlterCommands & commands, Context const auto & new_column = new_metadata.getColumns().get(command.column_name); if (!old_column.type->equals(*new_column.type)) throw Exception(ErrorCodes::ALTER_OF_COLUMN_IS_FORBIDDEN, - "ALTER types of column {} with statistics is not not safe " + "ALTER types of column {} with statistics is not safe " "because it can change the representation of statistics", backQuoteIfNeed(command.column_name)); } @@ -5888,7 +5888,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, ContextPtr loc if (partition_lit && partition_lit->value.getType() == Field::Types::String) { MergeTreePartInfo::validatePartitionID(partition_ast.value->clone(), format_version); - return partition_lit->value.get(); + return partition_lit->value.safeGet(); } } @@ -5951,7 +5951,7 @@ String MergeTreeData::getPartitionIDFromQuery(const ASTPtr & ast, ContextPtr loc throw Exception(ErrorCodes::INVALID_PARTITION_VALUE, "Expected tuple for complex partition key, got {}", partition_key_value.getTypeName()); - const Tuple & tuple = partition_key_value.get(); + const Tuple & tuple = partition_key_value.safeGet(); if (tuple.size() != fields_count) throw Exception(ErrorCodes::LOGICAL_ERROR, "Wrong number of fields in the partition expression: {}, must be: {}", tuple.size(), fields_count); @@ -6890,7 +6890,7 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( auto * place = arena.alignedAlloc(size_of_state, align_of_state); func->create(place); if (const AggregateFunctionCount * agg_count = typeid_cast(func.get())) - AggregateFunctionCount::set(place, value.get()); + AggregateFunctionCount::set(place, value.safeGet()); else { auto value_column = func->getArgumentTypes().front()->createColumnConst(1, value)->convertToFullColumnIfConst(); @@ -6932,7 +6932,8 @@ Block MergeTreeData::getMinMaxCountProjectionBlock( const auto * predicate = filter_dag->getOutputs().at(0); // Generate valid expressions for filtering - VirtualColumnUtils::filterBlockWithPredicate(predicate, virtual_columns_block, query_context); + VirtualColumnUtils::filterBlockWithPredicate( + predicate, virtual_columns_block, query_context, /*allow_filtering_with_partial_predicate =*/true); rows = virtual_columns_block.rows(); part_name_column = virtual_columns_block.getByName("_part").column; @@ -7527,7 +7528,7 @@ MergeTreeData::MatcherFn MergeTreeData::getPartitionMatcher(const ASTPtr & parti if (const auto * partition_lit = partition_ast->as().value->as()) { id = partition_lit->value.getType() == Field::Types::UInt64 - ? toString(partition_lit->value.get()) + ? toString(partition_lit->value.safeGet()) : partition_lit->value.safeGet(); prefixed = true; } diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp index b327480fa92..3ef36ce364c 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.cpp @@ -100,12 +100,6 @@ void MergeTreeDataPartChecksums::checkEqual(const MergeTreeDataPartChecksums & r } } -void MergeTreeDataPartChecksums::checkSizes(const IDataPartStorage & storage) const -{ - for (const auto & [name, checksum] : files) - checksum.checkSize(storage, name); -} - UInt64 MergeTreeDataPartChecksums::getTotalSizeOnDisk() const { UInt64 res = 0; diff --git a/src/Storages/MergeTree/MergeTreeDataPartChecksum.h b/src/Storages/MergeTree/MergeTreeDataPartChecksum.h index 05178dc3a60..dc52f1ada2b 100644 --- a/src/Storages/MergeTree/MergeTreeDataPartChecksum.h +++ b/src/Storages/MergeTree/MergeTreeDataPartChecksum.h @@ -65,9 +65,6 @@ struct MergeTreeDataPartChecksums static bool isBadChecksumsErrorCode(int code); - /// Checks that the directory contains all the needed files of the correct size. Does not check the checksum. - void checkSizes(const IDataPartStorage & storage) const; - /// Returns false if the checksum is too old. bool read(ReadBuffer & in); /// Assume that header with version (the first line) is read diff --git a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp index a6ef0063069..59f3a299c99 100644 --- a/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp +++ b/src/Storages/MergeTree/MergeTreeDataSelectExecutor.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -48,7 +49,6 @@ #include #include -#include namespace CurrentMetrics { @@ -1406,11 +1406,10 @@ MarkRanges MergeTreeDataSelectExecutor::filterMarksUsingIndex( if (index_mark != index_range.begin || !granule || last_index_mark != index_range.begin) reader.read(granule); - auto ann_condition = std::dynamic_pointer_cast(condition); - if (ann_condition != nullptr) + if (index_helper->isVectorSimilarityIndex()) { /// An array of indices of useful ranges. - auto result = ann_condition->getUsefulRanges(granule); + auto result = condition->getUsefulRanges(granule); for (auto range : result) { diff --git a/src/Storages/MergeTree/MergeTreeDataWriter.cpp b/src/Storages/MergeTree/MergeTreeDataWriter.cpp index cb02f1cf5f2..f29d715e791 100644 --- a/src/Storages/MergeTree/MergeTreeDataWriter.cpp +++ b/src/Storages/MergeTree/MergeTreeDataWriter.cpp @@ -451,8 +451,8 @@ MergeTreeDataWriter::TemporaryPart MergeTreeDataWriter::writeTempPartImpl( String part_name; if (data.format_version < MERGE_TREE_DATA_MIN_FORMAT_VERSION_WITH_CUSTOM_PARTITIONING) { - DayNum min_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].left.get()); - DayNum max_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].right.get()); + DayNum min_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].left.safeGet()); + DayNum max_date(minmax_idx->hyperrectangle[data.minmax_idx_date_column_pos].right.safeGet()); const auto & date_lut = DateLUT::serverTimezoneInstance(); diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.cpp b/src/Storages/MergeTree/MergeTreeIOSettings.cpp index 58c3bd28d6a..24cb25afe47 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.cpp +++ b/src/Storages/MergeTree/MergeTreeIOSettings.cpp @@ -27,7 +27,6 @@ MergeTreeWriterSettings::MergeTreeWriterSettings( , rewrite_primary_key(rewrite_primary_key_) , blocks_are_granules_size(blocks_are_granules_size_) , query_write_settings(query_write_settings_) - , max_threads_for_annoy_index_creation(global_settings.max_threads_for_annoy_index_creation) , low_cardinality_max_dictionary_size(global_settings.low_cardinality_max_dictionary_size) , low_cardinality_use_single_dictionary_for_part(global_settings.low_cardinality_use_single_dictionary_for_part != 0) , use_compact_variant_discriminators_serialization(storage_settings->use_compact_variant_discriminators_serialization) diff --git a/src/Storages/MergeTree/MergeTreeIOSettings.h b/src/Storages/MergeTree/MergeTreeIOSettings.h index c79ca1e66ee..47b174b2e29 100644 --- a/src/Storages/MergeTree/MergeTreeIOSettings.h +++ b/src/Storages/MergeTree/MergeTreeIOSettings.h @@ -77,8 +77,6 @@ struct MergeTreeWriterSettings bool blocks_are_granules_size; WriteSettings query_write_settings; - size_t max_threads_for_annoy_index_creation; - size_t low_cardinality_max_dictionary_size; bool low_cardinality_use_single_dictionary_for_part; bool use_compact_variant_discriminators_serialization; diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp b/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp deleted file mode 100644 index 497e86334f3..00000000000 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.cpp +++ /dev/null @@ -1,416 +0,0 @@ -#ifdef ENABLE_ANNOY - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int ILLEGAL_COLUMN; - extern const int INCORRECT_DATA; - extern const int INCORRECT_NUMBER_OF_COLUMNS; - extern const int INCORRECT_QUERY; - extern const int LOGICAL_ERROR; - extern const int NOT_IMPLEMENTED; -} - -template -AnnoyIndexWithSerialization::AnnoyIndexWithSerialization(size_t dimensions) - : Base::AnnoyIndex(static_cast(dimensions)) -{ -} - -template -void AnnoyIndexWithSerialization::serialize(WriteBuffer & ostr) const -{ - chassert(Base::_built); - writeIntBinary(Base::_s, ostr); - writeIntBinary(Base::_n_items, ostr); - writeIntBinary(Base::_n_nodes, ostr); - writeIntBinary(Base::_nodes_size, ostr); - writeIntBinary(Base::_K, ostr); - writeIntBinary(Base::_seed, ostr); - writeVectorBinary(Base::_roots, ostr); - ostr.write(reinterpret_cast(Base::_nodes), Base::_s * Base::_n_nodes); -} - -template -void AnnoyIndexWithSerialization::deserialize(ReadBuffer & istr) -{ - chassert(!Base::_built); - readIntBinary(Base::_s, istr); - readIntBinary(Base::_n_items, istr); - readIntBinary(Base::_n_nodes, istr); - readIntBinary(Base::_nodes_size, istr); - readIntBinary(Base::_K, istr); - readIntBinary(Base::_seed, istr); - readVectorBinary(Base::_roots, istr); - Base::_nodes = realloc(Base::_nodes, Base::_s * Base::_n_nodes); - istr.readStrict(reinterpret_cast(Base::_nodes), Base::_s * Base::_n_nodes); - - Base::_fd = 0; - // set flags - Base::_loaded = false; - Base::_verbose = false; - Base::_on_disk = false; - Base::_built = true; -} - -template -size_t AnnoyIndexWithSerialization::getDimensions() const -{ - return Base::get_f(); -} - - -template -MergeTreeIndexGranuleAnnoy::MergeTreeIndexGranuleAnnoy(const String & index_name_, const Block & index_sample_block_) - : index_name(index_name_) - , index_sample_block(index_sample_block_) - , index(nullptr) -{} - -template -MergeTreeIndexGranuleAnnoy::MergeTreeIndexGranuleAnnoy( - const String & index_name_, - const Block & index_sample_block_, - AnnoyIndexWithSerializationPtr index_) - : index_name(index_name_) - , index_sample_block(index_sample_block_) - , index(std::move(index_)) -{} - -template -void MergeTreeIndexGranuleAnnoy::serializeBinary(WriteBuffer & ostr) const -{ - /// Number of dimensions is required in the index constructor, - /// so it must be written and read separately from the other part - writeIntBinary(static_cast(index->getDimensions()), ostr); // write dimension - index->serialize(ostr); -} - -template -void MergeTreeIndexGranuleAnnoy::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) -{ - UInt64 dimension; - readIntBinary(dimension, istr); - index = std::make_shared>(dimension); - index->deserialize(istr); -} - -template -MergeTreeIndexAggregatorAnnoy::MergeTreeIndexAggregatorAnnoy( - const String & index_name_, - const Block & index_sample_block_, - UInt64 trees_, - size_t max_threads_for_creation_) - : index_name(index_name_) - , index_sample_block(index_sample_block_) - , trees(trees_) - , max_threads_for_creation(max_threads_for_creation_) -{} - -template -MergeTreeIndexGranulePtr MergeTreeIndexAggregatorAnnoy::getGranuleAndReset() -{ - int threads = (max_threads_for_creation == 0) ? -1 : static_cast(max_threads_for_creation); - /// clang-tidy reports a false positive: it considers %p with an outdated pointer in fprintf() (used by logging which we don't do) dereferencing - index->build(static_cast(trees), threads); - auto granule = std::make_shared>(index_name, index_sample_block, index); - index = nullptr; - return granule; -} - -template -void MergeTreeIndexAggregatorAnnoy::update(const Block & block, size_t * pos, size_t limit) -{ - if (*pos >= block.rows()) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "The provided position is not less than the number of block rows. Position: {}, Block rows: {}.", - *pos, block.rows()); - - size_t rows_read = std::min(limit, block.rows() - *pos); - - if (rows_read == 0) - return; - - if (rows_read > std::numeric_limits::max()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Index granularity is too big: more than 4B rows per index granule."); - - if (index_sample_block.columns() > 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); - - const String & index_column_name = index_sample_block.getByPosition(0).name; - ColumnPtr column_cut = block.getByName(index_column_name).column->cut(*pos, rows_read); - - if (const auto & column_array = typeid_cast(column_cut.get())) - { - const auto & column_array_data = column_array->getData(); - const auto & column_array_data_float = typeid_cast(column_array_data); - const auto & column_array_data_float_data = column_array_data_float.getData(); - - const auto & column_array_offsets = column_array->getOffsets(); - const size_t num_rows = column_array_offsets.size(); - - if (column_array->empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - - /// The Annoy algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an Annoy-indexed column or if no value was specified at all in which case the arrays take on their default - /// value which is also empty. - if (column_array->isDefaultAt(0)) - throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); - - /// Check all sizes are the same - size_t dimension = column_array_offsets[0]; - for (size_t i = 0; i < num_rows - 1; ++i) - if (column_array_offsets[i + 1] - column_array_offsets[i] != dimension) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); - - /// Also check that previously inserted blocks have the same size as this block. - /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across - /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. - if (index && index->getDimensions() != dimension) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); - - if (!index) - index = std::make_shared>(dimension); - - /// Add all rows of block - index->add_item(index->get_n_items(), column_array_data_float_data.data()); - for (size_t current_row = 1; current_row < num_rows; ++current_row) - index->add_item(index->get_n_items(), &column_array_data_float_data[column_array_offsets[current_row - 1]]); - } - else if (const auto & column_tuple = typeid_cast(column_cut.get())) - { - const auto & column_tuple_columns = column_tuple->getColumns(); - - /// TODO check if calling index->add_item() directly on the block's tuples is faster than materializing everything - std::vector> data(column_tuple->size(), std::vector()); - for (const auto & column : column_tuple_columns) - { - const auto & pod_array = typeid_cast(column.get())->getData(); - for (size_t i = 0; i < pod_array.size(); ++i) - data[i].push_back(pod_array[i]); - } - - if (data.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Tuple has 0 rows, {} rows expected", rows_read); - - if (!index) - index = std::make_shared>(data[0].size()); - - for (const auto & item : data) - index->add_item(index->get_n_items(), item.data()); - } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array or Tuple column"); - - *pos += rows_read; -} - - -MergeTreeIndexConditionAnnoy::MergeTreeIndexConditionAnnoy( - const IndexDescription & /*index_description*/, - const SelectQueryInfo & query, - const String & distance_function_, - ContextPtr context) - : ann_condition(query, context) - , distance_function(distance_function_) - , search_k(context->getSettingsRef().annoy_index_search_k_nodes) -{} - -bool MergeTreeIndexConditionAnnoy::mayBeTrueOnGranule(MergeTreeIndexGranulePtr /*idx_granule*/) const -{ - throw Exception(ErrorCodes::LOGICAL_ERROR, "mayBeTrueOnGranule is not supported for ANN skip indexes"); -} - -bool MergeTreeIndexConditionAnnoy::alwaysUnknownOrTrue() const -{ - return ann_condition.alwaysUnknownOrTrue(distance_function); -} - -std::vector MergeTreeIndexConditionAnnoy::getUsefulRanges(MergeTreeIndexGranulePtr idx_granule) const -{ - if (distance_function == DISTANCE_FUNCTION_L2) - return getUsefulRangesImpl(idx_granule); - else if (distance_function == DISTANCE_FUNCTION_COSINE) - return getUsefulRangesImpl(idx_granule); - std::unreachable(); -} - -template -std::vector MergeTreeIndexConditionAnnoy::getUsefulRangesImpl(MergeTreeIndexGranulePtr idx_granule) const -{ - const UInt64 limit = ann_condition.getLimit(); - const UInt64 index_granularity = ann_condition.getIndexGranularity(); - const std::optional comparison_distance = ann_condition.getQueryType() == ApproximateNearestNeighborInformation::Type::Where - ? std::optional(ann_condition.getComparisonDistanceForWhereQuery()) - : std::nullopt; - - if (comparison_distance && comparison_distance.value() < 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to optimize query with where without distance"); - - const std::vector reference_vector = ann_condition.getReferenceVector(); - - const auto granule = std::dynamic_pointer_cast>(idx_granule); - if (granule == nullptr) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Granule has the wrong type"); - - const AnnoyIndexWithSerializationPtr annoy = granule->index; - - if (ann_condition.getDimensions() != annoy->getDimensions()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) " - "does not match the dimension in the index ({})", - ann_condition.getDimensions(), annoy->getDimensions()); - - std::vector neighbors; /// indexes of dots which were closest to the reference vector - std::vector distances; - neighbors.reserve(limit); - distances.reserve(limit); - - annoy->get_nns_by_vector(reference_vector.data(), limit, static_cast(search_k), &neighbors, &distances); - - chassert(neighbors.size() == distances.size()); - - std::vector granules; - granules.reserve(neighbors.size()); - for (size_t i = 0; i < neighbors.size(); ++i) - { - if (comparison_distance && distances[i] > comparison_distance) - continue; - granules.push_back(neighbors[i] / index_granularity); - } - - /// make unique - std::sort(granules.begin(), granules.end()); - granules.erase(std::unique(granules.begin(), granules.end()), granules.end()); - - return granules; -} - -MergeTreeIndexAnnoy::MergeTreeIndexAnnoy(const IndexDescription & index_, UInt64 trees_, const String & distance_function_) - : IMergeTreeIndex(index_) - , trees(trees_) - , distance_function(distance_function_) -{} - -MergeTreeIndexGranulePtr MergeTreeIndexAnnoy::createIndexGranule() const -{ - if (distance_function == DISTANCE_FUNCTION_L2) - return std::make_shared>(index.name, index.sample_block); - else if (distance_function == DISTANCE_FUNCTION_COSINE) - return std::make_shared>(index.name, index.sample_block); - std::unreachable(); -} - -MergeTreeIndexAggregatorPtr MergeTreeIndexAnnoy::createIndexAggregator(const MergeTreeWriterSettings & settings) const -{ - /// TODO: Support more metrics. Available metrics: https://github.com/spotify/annoy/blob/master/src/annoymodule.cc#L151-L171 - if (distance_function == DISTANCE_FUNCTION_L2) - return std::make_shared>(index.name, index.sample_block, trees, settings.max_threads_for_annoy_index_creation); - else if (distance_function == DISTANCE_FUNCTION_COSINE) - return std::make_shared>(index.name, index.sample_block, trees, settings.max_threads_for_annoy_index_creation); - std::unreachable(); -} - -MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const -{ - return std::make_shared(index, query, distance_function, context); -}; - -MergeTreeIndexConditionPtr MergeTreeIndexAnnoy::createIndexCondition(const ActionsDAG *, ContextPtr) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); -} - -MergeTreeIndexPtr annoyIndexCreator(const IndexDescription & index) -{ - static constexpr auto DEFAULT_DISTANCE_FUNCTION = DISTANCE_FUNCTION_L2; - String distance_function = DEFAULT_DISTANCE_FUNCTION; - if (!index.arguments.empty()) - distance_function = index.arguments[0].get(); - - static constexpr auto DEFAULT_TREES = 100uz; - UInt64 trees = DEFAULT_TREES; - if (index.arguments.size() > 1) - trees = index.arguments[1].get(); - - return std::make_shared(index, trees, distance_function); -} - -void annoyIndexValidator(const IndexDescription & index, bool /* attach */) -{ - /// Check number and type of Annoy index arguments: - - if (index.arguments.size() > 2) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Annoy index must not have more than two parameters"); - - if (!index.arguments.empty() && index.arguments[0].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Distance function argument of Annoy index must be of type String"); - - if (index.arguments.size() > 1 && index.arguments[1].getType() != Field::Types::UInt64) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Number of trees argument of Annoy index must be of type UInt64"); - - /// Check that the index is created on a single column - - if (index.column_names.size() != 1 || index.data_types.size() != 1) - throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Annoy indexes must be created on a single column"); - - /// Check that a supported metric was passed as first argument - - if (!index.arguments.empty()) - { - String distance_name = index.arguments[0].get(); - if (distance_name != DISTANCE_FUNCTION_L2 && distance_name != DISTANCE_FUNCTION_COSINE) - throw Exception(ErrorCodes::INCORRECT_DATA, "Annoy index only supports distance functions '{}' and '{}'", DISTANCE_FUNCTION_L2, DISTANCE_FUNCTION_COSINE); - } - - /// Check data type of indexed column: - - auto throw_unsupported_underlying_column_exception = []() - { - throw Exception( - ErrorCodes::ILLEGAL_COLUMN, - "Annoy indexes can only be created on columns of type Array(Float32) and Tuple(Float32[, Float32[, ...]])"); - }; - - DataTypePtr data_type = index.sample_block.getDataTypes()[0]; - - if (const auto * data_type_array = typeid_cast(data_type.get())) - { - TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw_unsupported_underlying_column_exception(); - } - else if (const auto * data_type_tuple = typeid_cast(data_type.get())) - { - const DataTypes & inner_types = data_type_tuple->getElements(); - for (const auto & inner_type : inner_types) - { - TypeIndex nested_type_index = inner_type->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw_unsupported_underlying_column_exception(); - } - } - else - throw_unsupported_underlying_column_exception(); -} - -} - -#endif diff --git a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h b/src/Storages/MergeTree/MergeTreeIndexAnnoy.h deleted file mode 100644 index 282920c608e..00000000000 --- a/src/Storages/MergeTree/MergeTreeIndexAnnoy.h +++ /dev/null @@ -1,112 +0,0 @@ -#pragma once - -#ifdef ENABLE_ANNOY - -#include - -#include -#include - -namespace DB -{ - -template -class AnnoyIndexWithSerialization : public Annoy::AnnoyIndex -{ - using Base = Annoy::AnnoyIndex; - -public: - explicit AnnoyIndexWithSerialization(size_t dimensions); - void serialize(WriteBuffer & ostr) const; - void deserialize(ReadBuffer & istr); - size_t getDimensions() const; -}; - -template -using AnnoyIndexWithSerializationPtr = std::shared_ptr>; - - -template -struct MergeTreeIndexGranuleAnnoy final : public IMergeTreeIndexGranule -{ - MergeTreeIndexGranuleAnnoy(const String & index_name_, const Block & index_sample_block_); - MergeTreeIndexGranuleAnnoy(const String & index_name_, const Block & index_sample_block_, AnnoyIndexWithSerializationPtr index_); - - ~MergeTreeIndexGranuleAnnoy() override = default; - - void serializeBinary(WriteBuffer & ostr) const override; - void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; - - bool empty() const override { return !index.get(); } - - const String index_name; - const Block index_sample_block; - AnnoyIndexWithSerializationPtr index; -}; - - -template -struct MergeTreeIndexAggregatorAnnoy final : IMergeTreeIndexAggregator -{ - MergeTreeIndexAggregatorAnnoy(const String & index_name_, const Block & index_sample_block, UInt64 trees, size_t max_threads_for_creation); - ~MergeTreeIndexAggregatorAnnoy() override = default; - - bool empty() const override { return !index || index->get_n_items() == 0; } - MergeTreeIndexGranulePtr getGranuleAndReset() override; - void update(const Block & block, size_t * pos, size_t limit) override; - - const String index_name; - const Block index_sample_block; - const UInt64 trees; - const size_t max_threads_for_creation; - AnnoyIndexWithSerializationPtr index; -}; - - -class MergeTreeIndexConditionAnnoy final : public IMergeTreeIndexConditionApproximateNearestNeighbor -{ -public: - MergeTreeIndexConditionAnnoy( - const IndexDescription & index_description, - const SelectQueryInfo & query, - const String & distance_function, - ContextPtr context); - - ~MergeTreeIndexConditionAnnoy() override = default; - - bool alwaysUnknownOrTrue() const override; - bool mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx_granule) const override; - std::vector getUsefulRanges(MergeTreeIndexGranulePtr idx_granule) const override; - -private: - template - std::vector getUsefulRangesImpl(MergeTreeIndexGranulePtr idx_granule) const; - - const ApproximateNearestNeighborCondition ann_condition; - const String distance_function; - const Int64 search_k; -}; - - -class MergeTreeIndexAnnoy final : public IMergeTreeIndex -{ -public: - - MergeTreeIndexAnnoy(const IndexDescription & index_, UInt64 trees_, const String & distance_function_); - - ~MergeTreeIndexAnnoy() override = default; - - MergeTreeIndexGranulePtr createIndexGranule() const override; - MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; - MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; - bool isVectorSearch() const override { return true; } - -private: - const UInt64 trees; - const String distance_function; -}; - -} - -#endif diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp index dc314ce53d4..b796ed7114e 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilter.cpp @@ -348,19 +348,19 @@ bool MergeTreeIndexConditionBloomFilter::extractAtomFromTree(const RPNBuilderTre { if (const_value.getType() == Field::Types::UInt64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Int64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Float64) { - out.function = const_value.get() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } } @@ -703,7 +703,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseTreeEquals( const bool is_nullable = actual_type->isNullable(); auto mutable_column = actual_type->createColumn(); - for (const auto & f : value_field.get()) + for (const auto & f : value_field.safeGet()) { if ((f.isNull() && !is_nullable) || f.isDecimal(f.getType())) /// NOLINT(readability-static-accessed-through-instance) return false; @@ -774,7 +774,7 @@ bool MergeTreeIndexConditionBloomFilter::traverseTreeEquals( if (which.isTuple() && key_node_function_name == "tuple") { - const Tuple & tuple = value_field.get(); + const Tuple & tuple = value_field.safeGet(); const auto * value_tuple_data_type = typeid_cast(value_type.get()); if (tuple.size() != key_node_function_arguments_size) @@ -963,7 +963,7 @@ void bloomFilterIndexValidator(const IndexDescription & index, bool attach) { const auto & argument = index.arguments[0]; - if (!attach && (argument.getType() != Field::Types::Float64 || argument.get() < 0 || argument.get() > 1)) + if (!attach && (argument.getType() != Field::Types::Float64 || argument.safeGet() < 0 || argument.safeGet() > 1)) throw Exception(ErrorCodes::BAD_ARGUMENTS, "The BloomFilter false positive must be a double number between 0 and 1."); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp index 5b6813d12e3..857b7903588 100644 --- a/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexBloomFilterText.cpp @@ -341,19 +341,19 @@ bool MergeTreeConditionBloomFilterText::extractAtomFromTree(const RPNBuilderTree if (const_value.getType() == Field::Types::UInt64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Int64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Float64) { - out.function = const_value.get() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() != 0.0 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } } @@ -493,7 +493,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - auto value = const_value.get(); + auto value = const_value.safeGet(); if (is_case_insensitive_scenario) std::ranges::transform(value, value.begin(), [](const auto & c) { return static_cast(std::tolower(c)); }); @@ -509,7 +509,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_HAS; out.bloom_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -519,7 +519,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_HAS; out.bloom_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -529,7 +529,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -538,7 +538,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -547,7 +547,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -556,7 +556,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToBloomFilter(value.data(), value.size(), *out.bloom_filter); return true; } @@ -565,7 +565,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, true, false); return true; } @@ -574,7 +574,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.key_column = *key_index; out.function = RPNElement::FUNCTION_EQUALS; out.bloom_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToBloomFilter(value.data(), value.size(), *out.bloom_filter, false, true); return true; } @@ -589,13 +589,13 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( /// 2d vector is not needed here but is used because already exists for FUNCTION_IN std::vector> bloom_filters; bloom_filters.emplace_back(); - for (const auto & element : const_value.get()) + for (const auto & element : const_value.safeGet()) { if (element.getType() != Field::Types::String) return false; bloom_filters.back().emplace_back(params); - const auto & value = element.get(); + const auto & value = element.safeGet(); if (function_name == "multiSearchAny") { @@ -615,7 +615,7 @@ bool MergeTreeConditionBloomFilterText::traverseTreeEquals( out.function = RPNElement::FUNCTION_MATCH; out.bloom_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); String required_substring; bool dummy_is_trivial, dummy_required_substring_is_prefix; std::vector alternatives; @@ -743,11 +743,11 @@ MergeTreeIndexPtr bloomFilterIndexTextCreator( { if (index.type == NgramTokenExtractor::getName()) { - size_t n = index.arguments[0].get(); + size_t n = index.arguments[0].safeGet(); BloomFilterParameters params( - index.arguments[1].get(), - index.arguments[2].get(), - index.arguments[3].get()); + index.arguments[1].safeGet(), + index.arguments[2].safeGet(), + index.arguments[3].safeGet()); auto tokenizer = std::make_unique(n); @@ -756,9 +756,9 @@ MergeTreeIndexPtr bloomFilterIndexTextCreator( else if (index.type == SplitTokenExtractor::getName()) { BloomFilterParameters params( - index.arguments[0].get(), - index.arguments[1].get(), - index.arguments[2].get()); + index.arguments[0].safeGet(), + index.arguments[1].safeGet(), + index.arguments[2].safeGet()); auto tokenizer = std::make_unique(); @@ -815,9 +815,9 @@ void bloomFilterIndexTextValidator(const IndexDescription & index, bool /*attach /// Just validate BloomFilterParameters params( - index.arguments[0].get(), - index.arguments[1].get(), - index.arguments[2].get()); + index.arguments[0].safeGet(), + index.arguments[1].safeGet(), + index.arguments[2].safeGet()); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp index cd6af68ebcc..b5c6bb95d37 100644 --- a/src/Storages/MergeTree/MergeTreeIndexFullText.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexFullText.cpp @@ -74,7 +74,7 @@ void MergeTreeIndexGranuleFullText::deserializeBinary(ReadBuffer & istr, MergeTr for (auto & gin_filter : gin_filters) { size_serialization->deserializeBinary(field_rows, istr, {}); - size_t filter_size = field_rows.get(); + size_t filter_size = field_rows.safeGet(); gin_filter.getFilter().resize(filter_size); if (filter_size == 0) @@ -379,19 +379,19 @@ bool MergeTreeConditionFullText::traverseAtomAST(const RPNBuilderTreeNode & node /// Check constant like in KeyCondition if (const_value.getType() == Field::Types::UInt64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Int64) { - out.function = const_value.get() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } if (const_value.getType() == Field::Types::Float64) { - out.function = const_value.get() != 0.00 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; + out.function = const_value.safeGet() != 0.00 ? RPNElement::ALWAYS_TRUE : RPNElement::ALWAYS_FALSE; return true; } } @@ -530,7 +530,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_HAS; out.gin_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -539,7 +539,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_HAS; out.gin_filter = std::make_unique(params); - auto & value = const_value.get(); + auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -549,7 +549,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -558,7 +558,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -567,7 +567,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -576,7 +576,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_NOT_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringLikeToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -585,7 +585,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->stringToGinFilter(value.data(), value.size(), *out.gin_filter); return true; } @@ -594,7 +594,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, true, false); return true; } @@ -603,7 +603,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_EQUALS; out.gin_filter = std::make_unique(params); - const auto & value = const_value.get(); + const auto & value = const_value.safeGet(); token_extractor->substringToGinFilter(value.data(), value.size(), *out.gin_filter, false, true); return true; } @@ -615,13 +615,13 @@ bool MergeTreeConditionFullText::traverseASTEquals( /// 2d vector is not needed here but is used because already exists for FUNCTION_IN std::vector gin_filters; gin_filters.emplace_back(); - for (const auto & element : const_value.get()) + for (const auto & element : const_value.safeGet()) { if (element.getType() != Field::Types::String) return false; gin_filters.back().emplace_back(params); - const auto & value = element.get(); + const auto & value = element.safeGet(); token_extractor->substringToGinFilter(value.data(), value.size(), gin_filters.back().back(), false, false); } out.set_gin_filters = std::move(gin_filters); @@ -632,7 +632,7 @@ bool MergeTreeConditionFullText::traverseASTEquals( out.key_column = key_column_num; out.function = RPNElement::FUNCTION_MATCH; - auto & value = const_value.get(); + auto & value = const_value.safeGet(); String required_substring; bool dummy_is_trivial, dummy_required_substring_is_prefix; std::vector alternatives; @@ -776,8 +776,8 @@ MergeTreeIndexConditionPtr MergeTreeIndexFullText::createIndexCondition( MergeTreeIndexPtr fullTextIndexCreator( const IndexDescription & index) { - size_t n = index.arguments.empty() ? 0 : index.arguments[0].get(); - UInt64 max_rows = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].get(); + size_t n = index.arguments.empty() ? 0 : index.arguments[0].safeGet(); + UInt64 max_rows = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].safeGet(); GinFilterParameters params(n, max_rows); /// Use SplitTokenExtractor when n is 0, otherwise use NgramTokenExtractor @@ -826,12 +826,12 @@ void fullTextIndexValidator(const IndexDescription & index, bool /*attach*/) { if (index.arguments[1].getType() != Field::Types::UInt64) throw Exception(ErrorCodes::INCORRECT_QUERY, "The second full text index argument must be UInt64"); - if (index.arguments[1].get() != UNLIMITED_ROWS_PER_POSTINGS_LIST && index.arguments[1].get() < MIN_ROWS_PER_POSTINGS_LIST) + if (index.arguments[1].safeGet() != UNLIMITED_ROWS_PER_POSTINGS_LIST && index.arguments[1].safeGet() < MIN_ROWS_PER_POSTINGS_LIST) throw Exception(ErrorCodes::INCORRECT_QUERY, "The maximum rows per postings list must be no less than {}", MIN_ROWS_PER_POSTINGS_LIST); } /// Just validate - size_t ngrams = index.arguments.empty() ? 0 : index.arguments[0].get(); - UInt64 max_rows_per_postings_list = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].get(); + size_t ngrams = index.arguments.empty() ? 0 : index.arguments[0].safeGet(); + UInt64 max_rows_per_postings_list = index.arguments.size() < 2 ? DEFAULT_MAX_ROWS_PER_POSTINGS_LIST : index.arguments[1].safeGet(); GinFilterParameters params(ngrams, max_rows_per_postings_list); } } diff --git a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp index cd8065ecadf..abf3ae56376 100644 --- a/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexHypothesis.cpp @@ -37,7 +37,7 @@ void MergeTreeIndexGranuleHypothesis::deserializeBinary(ReadBuffer & istr, Merge Field field_met; const auto & size_type = DataTypePtr(std::make_shared()); size_type->getDefaultSerialization()->deserializeBinary(field_met, istr, {}); - met = field_met.get(); + met = field_met.safeGet(); is_empty = false; } diff --git a/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp new file mode 100644 index 00000000000..29de109d4fc --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.cpp @@ -0,0 +1,45 @@ +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int ILLEGAL_INDEX; +} + +MergeTreeIndexLegacyVectorSimilarity::MergeTreeIndexLegacyVectorSimilarity(const IndexDescription & index_) + : IMergeTreeIndex(index_) +{ +} + +MergeTreeIndexGranulePtr MergeTreeIndexLegacyVectorSimilarity::createIndexGranule() const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +} + +MergeTreeIndexAggregatorPtr MergeTreeIndexLegacyVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings &) const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +} + +MergeTreeIndexConditionPtr MergeTreeIndexLegacyVectorSimilarity::createIndexCondition(const SelectQueryInfo &, ContextPtr) const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +}; + +MergeTreeIndexConditionPtr MergeTreeIndexLegacyVectorSimilarity::createIndexCondition(const ActionsDAG *, ContextPtr) const +{ + throw Exception(ErrorCodes::ILLEGAL_INDEX, "Indexes of type 'annoy' or 'usearch' are no longer supported. Please drop and recreate the index as type 'vector_similarity'"); +} + +MergeTreeIndexPtr legacyVectorSimilarityIndexCreator(const IndexDescription & index) +{ + return std::make_shared(index); +} + +void legacyVectorSimilarityIndexValidator(const IndexDescription &, bool) +{ +} + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h new file mode 100644 index 00000000000..1015401823d --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexLegacyVectorSimilarity.h @@ -0,0 +1,26 @@ +#pragma once + +#include + +/// Walking corpse implementation for removed skipping index of type "annoy" and "usearch". +/// Its only purpose is to allow loading old tables with indexes of these types. +/// Data insertion and index usage/search will throw an exception, suggesting to migrate to "vector_similarity" indexes. + +namespace DB +{ + +class MergeTreeIndexLegacyVectorSimilarity : public IMergeTreeIndex +{ +public: + explicit MergeTreeIndexLegacyVectorSimilarity(const IndexDescription & index_); + ~MergeTreeIndexLegacyVectorSimilarity() override = default; + + MergeTreeIndexGranulePtr createIndexGranule() const override; + MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings &) const override; + MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo &, ContextPtr) const; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; + + bool isVectorSimilarityIndex() const override { return true; } +}; + +} diff --git a/src/Storages/MergeTree/MergeTreeIndexSet.cpp b/src/Storages/MergeTree/MergeTreeIndexSet.cpp index a92df4ac72d..fa242fccbc1 100644 --- a/src/Storages/MergeTree/MergeTreeIndexSet.cpp +++ b/src/Storages/MergeTree/MergeTreeIndexSet.cpp @@ -97,7 +97,7 @@ void MergeTreeIndexGranuleSet::deserializeBinary(ReadBuffer & istr, MergeTreeInd Field field_rows; const auto & size_type = DataTypePtr(std::make_shared()); size_type->getDefaultSerialization()->deserializeBinary(field_rows, istr, {}); - size_t rows_to_read = field_rows.get(); + size_t rows_to_read = field_rows.safeGet(); if (rows_to_read == 0) return; @@ -591,7 +591,7 @@ MergeTreeIndexConditionPtr MergeTreeIndexSet::createIndexCondition( MergeTreeIndexPtr setIndexCreator(const IndexDescription & index) { - size_t max_rows = index.arguments[0].get(); + size_t max_rows = index.arguments[0].safeGet(); return std::make_shared(index, max_rows); } diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp b/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp deleted file mode 100644 index 59a4b0fbf9c..00000000000 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.cpp +++ /dev/null @@ -1,463 +0,0 @@ -#ifdef ENABLE_USEARCH - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wpass-failed" - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ProfileEvents -{ - extern const Event USearchAddCount; - extern const Event USearchAddVisitedMembers; - extern const Event USearchAddComputedDistances; - extern const Event USearchSearchCount; - extern const Event USearchSearchVisitedMembers; - extern const Event USearchSearchComputedDistances; -} - -namespace DB -{ - -namespace ErrorCodes -{ - extern const int CANNOT_ALLOCATE_MEMORY; - extern const int ILLEGAL_COLUMN; - extern const int INCORRECT_DATA; - extern const int INCORRECT_NUMBER_OF_COLUMNS; - extern const int INCORRECT_QUERY; - extern const int LOGICAL_ERROR; - extern const int NOT_IMPLEMENTED; -} - -namespace -{ - -std::unordered_map nameToScalarKind = { - {"f64", unum::usearch::scalar_kind_t::f64_k}, - {"f32", unum::usearch::scalar_kind_t::f32_k}, - {"f16", unum::usearch::scalar_kind_t::f16_k}, - {"i8", unum::usearch::scalar_kind_t::i8_k}}; - -} - -template -USearchIndexWithSerialization::USearchIndexWithSerialization(size_t dimensions, unum::usearch::scalar_kind_t scalar_kind) - : Base(Base::make(unum::usearch::metric_punned_t(dimensions, Metric, scalar_kind))) -{ -} - -template -void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const -{ - auto callback = [&ostr](void * from, size_t n) - { - ostr.write(reinterpret_cast(from), n); - return true; - }; - - Base::save_to_stream(callback); -} - -template -void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) -{ - auto callback = [&istr](void * from, size_t n) - { - istr.readStrict(reinterpret_cast(from), n); - return true; - }; - - Base::load_from_stream(callback); -} - -template -size_t USearchIndexWithSerialization::getDimensions() const -{ - return Base::dimensions(); -} - -template -MergeTreeIndexGranuleUSearch::MergeTreeIndexGranuleUSearch( - const String & index_name_, - const Block & index_sample_block_, - unum::usearch::scalar_kind_t scalar_kind_) - : index_name(index_name_) - , index_sample_block(index_sample_block_) - , scalar_kind(scalar_kind_) - , index(nullptr) -{ -} - -template -MergeTreeIndexGranuleUSearch::MergeTreeIndexGranuleUSearch( - const String & index_name_, - const Block & index_sample_block_, - unum::usearch::scalar_kind_t scalar_kind_, - USearchIndexWithSerializationPtr index_) - : index_name(index_name_) - , index_sample_block(index_sample_block_) - , scalar_kind(scalar_kind_) - , index(std::move(index_)) -{ -} - -template -void MergeTreeIndexGranuleUSearch::serializeBinary(WriteBuffer & ostr) const -{ - /// Number of dimensions is required in the index constructor, - /// so it must be written and read separately from the other part - writeIntBinary(static_cast(index->getDimensions()), ostr); // write dimension - index->serialize(ostr); -} - -template -void MergeTreeIndexGranuleUSearch::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) -{ - UInt64 dimension; - readIntBinary(dimension, istr); - index = std::make_shared>(dimension, scalar_kind); - index->deserialize(istr); -} - -template -MergeTreeIndexAggregatorUSearch::MergeTreeIndexAggregatorUSearch( - const String & index_name_, - const Block & index_sample_block_, - unum::usearch::scalar_kind_t scalar_kind_) - : index_name(index_name_) - , index_sample_block(index_sample_block_) - , scalar_kind(scalar_kind_) -{ -} - -template -MergeTreeIndexGranulePtr MergeTreeIndexAggregatorUSearch::getGranuleAndReset() -{ - auto granule = std::make_shared>(index_name, index_sample_block, scalar_kind, index); - index = nullptr; - return granule; -} - -template -void MergeTreeIndexAggregatorUSearch::update(const Block & block, size_t * pos, size_t limit) -{ - if (*pos >= block.rows()) - throw Exception( - ErrorCodes::LOGICAL_ERROR, - "The provided position is not less than the number of block rows. Position: {}, Block rows: {}.", - *pos, - block.rows()); - - size_t rows_read = std::min(limit, block.rows() - *pos); - - if (rows_read == 0) - return; - - if (rows_read > std::numeric_limits::max()) - throw Exception(ErrorCodes::INCORRECT_DATA, "Index granularity is too big: more than 4B rows per index granule."); - - if (index_sample_block.columns() > 1) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); - - const String & index_column_name = index_sample_block.getByPosition(0).name; - ColumnPtr column_cut = block.getByName(index_column_name).column->cut(*pos, rows_read); - - if (const auto & column_array = typeid_cast(column_cut.get())) - { - const auto & column_array_data = column_array->getData(); - const auto & column_array_data_float = typeid_cast(column_array_data); - const auto & column_array_data_float_data = column_array_data_float.getData(); - - const auto & column_array_offsets = column_array->getOffsets(); - const size_t num_rows = column_array_offsets.size(); - - if (column_array->empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); - - /// The Usearch algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays - /// are INSERTed into an Usearch-indexed column or if no value was specified at all in which case the arrays take on their default - /// values which is also empty. - if (column_array->isDefaultAt(0)) - throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); - - /// Check all sizes are the same - size_t dimension = column_array_offsets[0]; - for (size_t i = 0; i < num_rows - 1; ++i) - if (column_array_offsets[i + 1] - column_array_offsets[i] != dimension) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); - - /// Also check that previously inserted blocks have the same size as this block. - /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across - /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. - if (index && index->getDimensions() != dimension) - throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); - - if (!index) - index = std::make_shared>(dimension, scalar_kind); - - /// Add all rows of block - if (!index->reserve(unum::usearch::ceil2(index->size() + num_rows))) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index"); - - for (size_t current_row = 0; current_row < num_rows; ++current_row) - { - auto rc = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[current_row - 1]]); - if (!rc) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, rc.error.release()); - - ProfileEvents::increment(ProfileEvents::USearchAddCount); - ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, rc.visited_members); - ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, rc.computed_distances); - } - } - else if (const auto & column_tuple = typeid_cast(column_cut.get())) - { - const auto & column_tuple_columns = column_tuple->getColumns(); - std::vector> data(column_tuple->size(), std::vector()); - for (const auto & column : column_tuple_columns) - { - const auto & pod_array = typeid_cast(column.get())->getData(); - for (size_t i = 0; i < pod_array.size(); ++i) - data[i].push_back(pod_array[i]); - } - - if (data.empty()) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Tuple has 0 rows, {} rows expected", rows_read); - - if (!index) - index = std::make_shared>(data[0].size(), scalar_kind); - - if (!index->reserve(unum::usearch::ceil2(index->size() + data.size()))) - throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index"); - - for (const auto & item : data) - { - auto rc = index->add(static_cast(index->size()), item.data()); - if (!rc) - throw Exception::createRuntime(ErrorCodes::INCORRECT_DATA, rc.error.release()); - - ProfileEvents::increment(ProfileEvents::USearchAddCount); - ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, rc.visited_members); - ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, rc.computed_distances); - } - } - else - throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array or Tuple column"); - - *pos += rows_read; -} - -MergeTreeIndexConditionUSearch::MergeTreeIndexConditionUSearch( - const IndexDescription & /*index_description*/, - const SelectQueryInfo & query, - const String & distance_function_, - ContextPtr context) - : ann_condition(query, context) - , distance_function(distance_function_) -{ -} - -bool MergeTreeIndexConditionUSearch::mayBeTrueOnGranule(MergeTreeIndexGranulePtr /*idx_granule*/) const -{ - throw Exception(ErrorCodes::LOGICAL_ERROR, "mayBeTrueOnGranule is not supported for ANN skip indexes"); -} - -bool MergeTreeIndexConditionUSearch::alwaysUnknownOrTrue() const -{ - return ann_condition.alwaysUnknownOrTrue(distance_function); -} - -std::vector MergeTreeIndexConditionUSearch::getUsefulRanges(MergeTreeIndexGranulePtr idx_granule) const -{ - if (distance_function == DISTANCE_FUNCTION_L2) - return getUsefulRangesImpl(idx_granule); - else if (distance_function == DISTANCE_FUNCTION_COSINE) - return getUsefulRangesImpl(idx_granule); - std::unreachable(); -} - -template -std::vector MergeTreeIndexConditionUSearch::getUsefulRangesImpl(MergeTreeIndexGranulePtr idx_granule) const -{ - const UInt64 limit = ann_condition.getLimit(); - const UInt64 index_granularity = ann_condition.getIndexGranularity(); - const std::optional comparison_distance = ann_condition.getQueryType() == ApproximateNearestNeighborInformation::Type::Where - ? std::optional(ann_condition.getComparisonDistanceForWhereQuery()) - : std::nullopt; - - if (comparison_distance && comparison_distance.value() < 0) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to optimize query with where without distance"); - - const std::vector reference_vector = ann_condition.getReferenceVector(); - - const auto granule = std::dynamic_pointer_cast>(idx_granule); - if (granule == nullptr) - throw Exception(ErrorCodes::LOGICAL_ERROR, "Granule has the wrong type"); - - const USearchIndexWithSerializationPtr index = granule->index; - - if (ann_condition.getDimensions() != index->dimensions()) - throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) " - "does not match the dimension in the index ({})", - ann_condition.getDimensions(), index->dimensions()); - - auto result = index->search(reference_vector.data(), limit); - - ProfileEvents::increment(ProfileEvents::USearchSearchCount); - ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, result.visited_members); - ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, result.computed_distances); - - std::vector neighbors(result.size()); /// indexes of dots which were closest to the reference vector - std::vector distances(result.size()); - result.dump_to(neighbors.data(), distances.data()); - - std::vector granules; - granules.reserve(neighbors.size()); - for (size_t i = 0; i < neighbors.size(); ++i) - { - if (comparison_distance && distances[i] > comparison_distance) - continue; - granules.push_back(neighbors[i] / index_granularity); - } - - /// make unique - std::sort(granules.begin(), granules.end()); - granules.erase(std::unique(granules.begin(), granules.end()), granules.end()); - - return granules; -} - -MergeTreeIndexUSearch::MergeTreeIndexUSearch(const IndexDescription & index_, const String & distance_function_, unum::usearch::scalar_kind_t scalar_kind_) - : IMergeTreeIndex(index_) - , distance_function(distance_function_) - , scalar_kind(scalar_kind_) -{ -} - -MergeTreeIndexGranulePtr MergeTreeIndexUSearch::createIndexGranule() const -{ - if (distance_function == DISTANCE_FUNCTION_L2) - return std::make_shared>(index.name, index.sample_block, scalar_kind); - else if (distance_function == DISTANCE_FUNCTION_COSINE) - return std::make_shared>(index.name, index.sample_block, scalar_kind); - std::unreachable(); -} - -MergeTreeIndexAggregatorPtr MergeTreeIndexUSearch::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const -{ - if (distance_function == DISTANCE_FUNCTION_L2) - return std::make_shared>(index.name, index.sample_block, scalar_kind); - else if (distance_function == DISTANCE_FUNCTION_COSINE) - return std::make_shared>(index.name, index.sample_block, scalar_kind); - std::unreachable(); -} - -MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const -{ - return std::make_shared(index, query, distance_function, context); -}; - -MergeTreeIndexConditionPtr MergeTreeIndexUSearch::createIndexCondition(const ActionsDAG *, ContextPtr) const -{ - throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); -} - -MergeTreeIndexPtr usearchIndexCreator(const IndexDescription & index) -{ - static constexpr auto default_distance_function = DISTANCE_FUNCTION_L2; - String distance_function = default_distance_function; - if (!index.arguments.empty()) - distance_function = index.arguments[0].get(); - - static constexpr auto default_scalar_kind = unum::usearch::scalar_kind_t::f16_k; - auto scalar_kind = default_scalar_kind; - if (index.arguments.size() > 1) - scalar_kind = nameToScalarKind.at(index.arguments[1].get()); - - return std::make_shared(index, distance_function, scalar_kind); -} - -void usearchIndexValidator(const IndexDescription & index, bool /* attach */) -{ - /// Check number and type of USearch index arguments: - - if (index.arguments.size() > 2) - throw Exception(ErrorCodes::INCORRECT_QUERY, "USearch index must not have more than one parameters"); - - if (!index.arguments.empty() && index.arguments[0].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of USearch index (distance function) must be of type String"); - if (index.arguments.size() > 1 && index.arguments[1].getType() != Field::Types::String) - throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of USearch index (scalar type) must be of type String"); - - /// Check that the index is created on a single column - - if (index.column_names.size() != 1 || index.data_types.size() != 1) - throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "USearch indexes must be created on a single column"); - - /// Check that a supported metric was passed as first argument - - if (!index.arguments.empty()) - { - String distance_name = index.arguments[0].get(); - if (distance_name != DISTANCE_FUNCTION_L2 && distance_name != DISTANCE_FUNCTION_COSINE) - throw Exception(ErrorCodes::INCORRECT_DATA, "USearch index only supports distance functions '{}' and '{}'", DISTANCE_FUNCTION_L2, DISTANCE_FUNCTION_COSINE); - } - - /// Check that a supported kind was passed as a second argument - - if (index.arguments.size() > 1 && !nameToScalarKind.contains(index.arguments[1].get())) - { - String supported_kinds; - for (const auto & [name, kind] : nameToScalarKind) - { - if (!supported_kinds.empty()) - supported_kinds += ", "; - supported_kinds += name; - } - throw Exception(ErrorCodes::INCORRECT_DATA, "Unrecognized scalar kind (second argument) for USearch index. Supported kinds are: {}", supported_kinds); - } - - /// Check data type of indexed column: - - auto throw_unsupported_underlying_column_exception = []() - { - throw Exception( - ErrorCodes::ILLEGAL_COLUMN, - "USearch can only be created on columns of type Array(Float32) and Tuple(Float32[, Float32[, ...]])"); - }; - - DataTypePtr data_type = index.sample_block.getDataTypes()[0]; - - if (const auto * data_type_array = typeid_cast(data_type.get())) - { - TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw_unsupported_underlying_column_exception(); - } - else if (const auto * data_type_tuple = typeid_cast(data_type.get())) - { - const DataTypes & inner_types = data_type_tuple->getElements(); - for (const auto & inner_type : inner_types) - { - TypeIndex nested_type_index = inner_type->getTypeId(); - if (!WhichDataType(nested_type_index).isFloat32()) - throw_unsupported_underlying_column_exception(); - } - } - else - throw_unsupported_underlying_column_exception(); -} - -} - -#endif diff --git a/src/Storages/MergeTree/MergeTreeIndexUSearch.h b/src/Storages/MergeTree/MergeTreeIndexUSearch.h deleted file mode 100644 index 41de94402c9..00000000000 --- a/src/Storages/MergeTree/MergeTreeIndexUSearch.h +++ /dev/null @@ -1,116 +0,0 @@ -#pragma once - -#ifdef ENABLE_USEARCH - -#include - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wpass-failed" -#include -#pragma clang diagnostic pop - -namespace DB -{ - -using USearchImplType = unum::usearch::index_dense_gt; - -template -class USearchIndexWithSerialization : public USearchImplType -{ - using Base = USearchImplType; - -public: - USearchIndexWithSerialization(size_t dimensions, unum::usearch::scalar_kind_t scalar_kind); - void serialize(WriteBuffer & ostr) const; - void deserialize(ReadBuffer & istr); - size_t getDimensions() const; -}; - -template -using USearchIndexWithSerializationPtr = std::shared_ptr>; - - -template -struct MergeTreeIndexGranuleUSearch final : public IMergeTreeIndexGranule -{ - MergeTreeIndexGranuleUSearch(const String & index_name_, const Block & index_sample_block_, unum::usearch::scalar_kind_t scalar_kind_); - MergeTreeIndexGranuleUSearch(const String & index_name_, const Block & index_sample_block_, unum::usearch::scalar_kind_t scalar_kind_, USearchIndexWithSerializationPtr index_); - - ~MergeTreeIndexGranuleUSearch() override = default; - - void serializeBinary(WriteBuffer & ostr) const override; - void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; - - bool empty() const override { return !index.get(); } - - const String index_name; - const Block index_sample_block; - const unum::usearch::scalar_kind_t scalar_kind; - USearchIndexWithSerializationPtr index; -}; - - -template -struct MergeTreeIndexAggregatorUSearch final : IMergeTreeIndexAggregator -{ - MergeTreeIndexAggregatorUSearch(const String & index_name_, const Block & index_sample_block, unum::usearch::scalar_kind_t scalar_kind_); - ~MergeTreeIndexAggregatorUSearch() override = default; - - bool empty() const override { return !index || index->size() == 0; } - MergeTreeIndexGranulePtr getGranuleAndReset() override; - void update(const Block & block, size_t * pos, size_t limit) override; - - const String index_name; - const Block index_sample_block; - const unum::usearch::scalar_kind_t scalar_kind; - USearchIndexWithSerializationPtr index; -}; - - -class MergeTreeIndexConditionUSearch final : public IMergeTreeIndexConditionApproximateNearestNeighbor -{ -public: - MergeTreeIndexConditionUSearch( - const IndexDescription & index_description, - const SelectQueryInfo & query, - const String & distance_function, - ContextPtr context); - - ~MergeTreeIndexConditionUSearch() override = default; - - bool alwaysUnknownOrTrue() const override; - bool mayBeTrueOnGranule(MergeTreeIndexGranulePtr idx_granule) const override; - std::vector getUsefulRanges(MergeTreeIndexGranulePtr idx_granule) const override; - -private: - template - std::vector getUsefulRangesImpl(MergeTreeIndexGranulePtr idx_granule) const; - - const ApproximateNearestNeighborCondition ann_condition; - const String distance_function; -}; - - -class MergeTreeIndexUSearch : public IMergeTreeIndex -{ -public: - MergeTreeIndexUSearch(const IndexDescription & index_, const String & distance_function_, unum::usearch::scalar_kind_t scalar_kind_); - - ~MergeTreeIndexUSearch() override = default; - - MergeTreeIndexGranulePtr createIndexGranule() const override; - MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; - MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; - MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; - bool isVectorSearch() const override { return true; } - -private: - const String distance_function; - const unum::usearch::scalar_kind_t scalar_kind; -}; - -} - - -#endif - diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp new file mode 100644 index 00000000000..fbbc66bd8db --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.cpp @@ -0,0 +1,520 @@ +#include + +#if USE_USEARCH + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ProfileEvents +{ + extern const Event USearchAddCount; + extern const Event USearchAddVisitedMembers; + extern const Event USearchAddComputedDistances; + extern const Event USearchSearchCount; + extern const Event USearchSearchVisitedMembers; + extern const Event USearchSearchComputedDistances; +} + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int CANNOT_ALLOCATE_MEMORY; + extern const int FORMAT_VERSION_TOO_OLD; + extern const int ILLEGAL_COLUMN; + extern const int INCORRECT_DATA; + extern const int INCORRECT_NUMBER_OF_COLUMNS; + extern const int INCORRECT_QUERY; + extern const int LOGICAL_ERROR; + extern const int NOT_IMPLEMENTED; +} + +namespace +{ + +/// The only indexing method currently supported by USearch +const std::set methods = {"hnsw"}; + +/// Maps from user-facing name to internal name +const std::unordered_map distanceFunctionToMetricKind = { + {"L2Distance", unum::usearch::metric_kind_t::l2sq_k}, + {"cosineDistance", unum::usearch::metric_kind_t::cos_k}}; + +/// Maps from user-facing name to internal name +const std::unordered_map quantizationToScalarKind = { + {"f32", unum::usearch::scalar_kind_t::f32_k}, + {"f16", unum::usearch::scalar_kind_t::f16_k}, + {"i8", unum::usearch::scalar_kind_t::i8_k}}; + +template +concept is_set = std::same_as>; + +template +concept is_unordered_map = std::same_as>; + +template +String joinByComma(const T & t) +{ + if constexpr (is_set) + { + return fmt::format("{}", fmt::join(t, ", ")); + } + else if constexpr (is_unordered_map) + { + String joined_keys; + for (const auto & [k, _] : t) + { + if (!joined_keys.empty()) + joined_keys += ", "; + joined_keys += k; + } + return joined_keys; + } + /// TODO once our libcxx is recent enough, replace above by + /// return fmt::format("{}", fmt::join(std::views::keys(t)), ", ")); + std::unreachable(); +} + +} + +USearchIndexWithSerialization::USearchIndexWithSerialization( + size_t dimensions, + unum::usearch::metric_kind_t metric_kind, + unum::usearch::scalar_kind_t scalar_kind, + UsearchHnswParams usearch_hnsw_params) +{ + USearchIndex::metric_t metric(dimensions, metric_kind, scalar_kind); + + unum::usearch::index_dense_config_t config(usearch_hnsw_params.m, usearch_hnsw_params.ef_construction, usearch_hnsw_params.ef_search); + config.enable_key_lookups = false; /// we don't do row-to-vector lookups + + if (auto error = config.validate(); error) /// already called in vectorSimilarityIndexValidator, call again because usearch may change the config in-place + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); + + if (auto result = USearchIndex::make(metric, config); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not create vector similarity index. Error: {}", String(result.error.release())); + else + swap(result.index); +} + +void USearchIndexWithSerialization::serialize(WriteBuffer & ostr) const +{ + auto callback = [&ostr](void * from, size_t n) + { + ostr.write(reinterpret_cast(from), n); + return true; + }; + + if (auto result = Base::save_to_stream(callback); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not save vector similarity index. Error: {}", String(result.error.release())); +} + +void USearchIndexWithSerialization::deserialize(ReadBuffer & istr) +{ + auto callback = [&istr](void * from, size_t n) + { + istr.readStrict(reinterpret_cast(from), n); + return true; + }; + + if (auto result = Base::load_from_stream(callback); !result) + /// See the comment in MergeTreeIndexGranuleVectorSimilarity::deserializeBinary why we throw here + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not load vector similarity index. Please drop the index and create it again. Error: {}", String(result.error.release())); + + if (!try_reserve(limits())) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for usearch index"); +} + +USearchIndexWithSerialization::Statistics USearchIndexWithSerialization::getStatistics() const +{ + USearchIndex::stats_t global_stats = Base::stats(); + + Statistics statistics = { + .max_level = max_level(), + .connectivity = connectivity(), + .size = size(), + .capacity = capacity(), + .memory_usage = memory_usage(), + .bytes_per_vector = bytes_per_vector(), + .scalar_words = scalar_words(), + .nodes = global_stats.nodes, + .edges = global_stats.edges, + .max_edges = global_stats.max_edges, + .level_stats = {}}; + + for (size_t i = 0; i < statistics.max_level; ++i) + statistics.level_stats.push_back(Base::stats(i)); + + return statistics; +} + +String USearchIndexWithSerialization::Statistics::toString() const +{ + return fmt::format("max_level = {}, connectivity = {}, size = {}, capacity = {}, memory_usage = {}, bytes_per_vector = {}, scalar_words = {}, nodes = {}, edges = {}, max_edges = {}", + max_level, connectivity, size, capacity, ReadableSize(memory_usage), bytes_per_vector, scalar_words, nodes, edges, max_edges); + +} +MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( + const String & index_name_, + const Block & index_sample_block_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_) + : MergeTreeIndexGranuleVectorSimilarity(index_name_, index_sample_block_, metric_kind_, scalar_kind_, usearch_hnsw_params_, nullptr) +{ +} + +MergeTreeIndexGranuleVectorSimilarity::MergeTreeIndexGranuleVectorSimilarity( + const String & index_name_, + const Block & index_sample_block_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_, + USearchIndexWithSerializationPtr index_) + : index_name(index_name_) + , index_sample_block(index_sample_block_) + , metric_kind(metric_kind_) + , scalar_kind(scalar_kind_) + , usearch_hnsw_params(usearch_hnsw_params_) + , index(std::move(index_)) +{ +} + +void MergeTreeIndexGranuleVectorSimilarity::serializeBinary(WriteBuffer & ostr) const +{ + if (empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Attempt to write empty minmax index {}", backQuote(index_name)); + + writeIntBinary(FILE_FORMAT_VERSION, ostr); + + /// Number of dimensions is required in the index constructor, + /// so it must be written and read separately from the other part + writeIntBinary(static_cast(index->dimensions()), ostr); + + index->serialize(ostr); + + auto statistics = index->getStatistics(); + LOG_TRACE(logger, "Wrote vector similarity index: {}", statistics.toString()); +} + +void MergeTreeIndexGranuleVectorSimilarity::deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion /*version*/) +{ + UInt64 file_version; + readIntBinary(file_version, istr); + if (file_version != FILE_FORMAT_VERSION) + throw Exception( + ErrorCodes::FORMAT_VERSION_TOO_OLD, + "Vector similarity index could not be loaded because its version is too old (current version: {}, persisted version: {}). Please drop the index and create it again.", + FILE_FORMAT_VERSION, file_version); + /// More fancy error handling would be: Set a flag on the index that it failed to load. During usage return all granules, i.e. + /// behave as if the index does not exist. Since format changes are expected to happen only rarely and it is "only" an index, keep it simple for now. + + UInt64 dimension; + readIntBinary(dimension, istr); + index = std::make_shared(dimension, metric_kind, scalar_kind, usearch_hnsw_params); + + index->deserialize(istr); + + auto statistics = index->getStatistics(); + LOG_TRACE(logger, "Loaded vector similarity index: {}", statistics.toString()); +} + +MergeTreeIndexAggregatorVectorSimilarity::MergeTreeIndexAggregatorVectorSimilarity( + const String & index_name_, + const Block & index_sample_block_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_) + : index_name(index_name_) + , index_sample_block(index_sample_block_) + , metric_kind(metric_kind_) + , scalar_kind(scalar_kind_) + , usearch_hnsw_params(usearch_hnsw_params_) +{ +} + +MergeTreeIndexGranulePtr MergeTreeIndexAggregatorVectorSimilarity::getGranuleAndReset() +{ + auto granule = std::make_shared(index_name, index_sample_block, metric_kind, scalar_kind, usearch_hnsw_params, index); + index = nullptr; + return granule; +} + +void MergeTreeIndexAggregatorVectorSimilarity::update(const Block & block, size_t * pos, size_t limit) +{ + if (*pos >= block.rows()) + throw Exception( + ErrorCodes::LOGICAL_ERROR, + "The provided position is not less than the number of block rows. Position: {}, Block rows: {}.", + *pos, + block.rows()); + + size_t rows_read = std::min(limit, block.rows() - *pos); + + if (rows_read == 0) + return; + + if (rows_read > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Index granularity is too big: more than {} rows per index granule.", std::numeric_limits::max()); + + if (index_sample_block.columns() > 1) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected block with single column"); + + const String & index_column_name = index_sample_block.getByPosition(0).name; + ColumnPtr column_cut = block.getByName(index_column_name).column->cut(*pos, rows_read); + + if (const auto & column_array = typeid_cast(column_cut.get())) + { + const auto & column_array_data = column_array->getData(); + const auto & column_array_data_float = typeid_cast(column_array_data); + const auto & column_array_data_float_data = column_array_data_float.getData(); + + const auto & column_array_offsets = column_array->getOffsets(); + const size_t num_rows = column_array_offsets.size(); + + if (column_array->empty()) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Array is unexpectedly empty"); + + /// The vector similarity algorithm naturally assumes that the indexed vectors have dimension >= 1. This condition is violated if empty arrays + /// are INSERTed into an vector-similarity-indexed column or if no value was specified at all in which case the arrays take on their default + /// values which is also empty. + if (column_array->isDefaultAt(0)) + throw Exception(ErrorCodes::INCORRECT_DATA, "The arrays in column '{}' must not be empty. Did you try to INSERT default values?", index_column_name); + + /// Check all sizes are the same + const size_t dimensions = column_array_offsets[0]; + for (size_t i = 0; i < num_rows - 1; ++i) + if (column_array_offsets[i + 1] - column_array_offsets[i] != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); + + /// Also check that previously inserted blocks have the same size as this block. + /// Note that this guarantees consistency of dimension only within parts. We are unable to detect inconsistent dimensions across + /// parts - for this, a little help from the user is needed, e.g. CONSTRAINT cnstr CHECK length(array) = 42. + if (index && index->dimensions() != dimensions) + throw Exception(ErrorCodes::INCORRECT_DATA, "All arrays in column '{}' must have equal length", index_column_name); + + if (!index) + index = std::make_shared(dimensions, metric_kind, scalar_kind, usearch_hnsw_params); + + /// We use Usearch's index_dense_t as index type which supports only 4 bio entries according to https://github.com/unum-cloud/usearch/tree/main/cpp + if (index->size() + num_rows > std::numeric_limits::max()) + throw Exception(ErrorCodes::INCORRECT_DATA, "Size of vector similarity index in column {} would exceed 4 billion entries", index_column_name); + + /// Reserving space is mandatory + if (!index->try_reserve(roundUpToPowerOfTwoOrZero(index->size() + num_rows))) + throw Exception(ErrorCodes::CANNOT_ALLOCATE_MEMORY, "Could not reserve memory for vector similarity index"); + + for (size_t row = 0; row < num_rows; ++row) + { + if (auto result = index->add(static_cast(index->size()), &column_array_data_float_data[column_array_offsets[row - 1]]); !result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not add data to vector similarity index. Error: {}", String(result.error.release())); + else + { + ProfileEvents::increment(ProfileEvents::USearchAddCount); + ProfileEvents::increment(ProfileEvents::USearchAddVisitedMembers, result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchAddComputedDistances, result.computed_distances); + } + } + } + else + throw Exception(ErrorCodes::LOGICAL_ERROR, "Expected Array(Float32) column"); + + *pos += rows_read; +} + +MergeTreeIndexConditionVectorSimilarity::MergeTreeIndexConditionVectorSimilarity( + const IndexDescription & /*index_description*/, + const SelectQueryInfo & query, + unum::usearch::metric_kind_t metric_kind_, + ContextPtr context) + : vector_similarity_condition(query, context) + , metric_kind(metric_kind_) +{ +} + +bool MergeTreeIndexConditionVectorSimilarity::mayBeTrueOnGranule(MergeTreeIndexGranulePtr) const +{ + throw Exception(ErrorCodes::LOGICAL_ERROR, "mayBeTrueOnGranule is not supported for ANN skip indexes"); +} + +bool MergeTreeIndexConditionVectorSimilarity::alwaysUnknownOrTrue() const +{ + String index_distance_function; + switch (metric_kind) + { + case unum::usearch::metric_kind_t::l2sq_k: index_distance_function = "L2Distance"; break; + case unum::usearch::metric_kind_t::cos_k: index_distance_function = "cosineDistance"; break; + default: std::unreachable(); + } + return vector_similarity_condition.alwaysUnknownOrTrue(index_distance_function); +} + +std::vector MergeTreeIndexConditionVectorSimilarity::getUsefulRanges(MergeTreeIndexGranulePtr granule_) const +{ + const UInt64 limit = vector_similarity_condition.getLimit(); + const UInt64 index_granularity = vector_similarity_condition.getIndexGranularity(); + + const auto granule = std::dynamic_pointer_cast(granule_); + if (granule == nullptr) + throw Exception(ErrorCodes::LOGICAL_ERROR, "Granule has the wrong type"); + + const USearchIndexWithSerializationPtr index = granule->index; + + if (vector_similarity_condition.getDimensions() != index->dimensions()) + throw Exception(ErrorCodes::INCORRECT_QUERY, "The dimension of the space in the request ({}) " + "does not match the dimension in the index ({})", + vector_similarity_condition.getDimensions(), index->dimensions()); + + const std::vector reference_vector = vector_similarity_condition.getReferenceVector(); + + auto search_result = index->search(reference_vector.data(), limit); + if (!search_result) + throw Exception(ErrorCodes::INCORRECT_DATA, "Could not search in vector similarity index. Error: {}", String(search_result.error.release())); + + ProfileEvents::increment(ProfileEvents::USearchSearchCount); + ProfileEvents::increment(ProfileEvents::USearchSearchVisitedMembers, search_result.visited_members); + ProfileEvents::increment(ProfileEvents::USearchSearchComputedDistances, search_result.computed_distances); + + std::vector neighbors(search_result.size()); /// indexes of vectors which were closest to the reference vector + search_result.dump_to(neighbors.data()); + + std::vector granules; + granules.reserve(neighbors.size()); + for (auto neighbor : neighbors) + granules.push_back(neighbor / index_granularity); + + /// make unique + std::sort(granules.begin(), granules.end()); + granules.erase(std::unique(granules.begin(), granules.end()), granules.end()); + + return granules; +} + +MergeTreeIndexVectorSimilarity::MergeTreeIndexVectorSimilarity( + const IndexDescription & index_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_) + : IMergeTreeIndex(index_) + , metric_kind(metric_kind_) + , scalar_kind(scalar_kind_) + , usearch_hnsw_params(usearch_hnsw_params_) +{ +} + +MergeTreeIndexGranulePtr MergeTreeIndexVectorSimilarity::createIndexGranule() const +{ + return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind, usearch_hnsw_params); +} + +MergeTreeIndexAggregatorPtr MergeTreeIndexVectorSimilarity::createIndexAggregator(const MergeTreeWriterSettings & /*settings*/) const +{ + return std::make_shared(index.name, index.sample_block, metric_kind, scalar_kind, usearch_hnsw_params); +} + +MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const +{ + return std::make_shared(index, query, metric_kind, context); +}; + +MergeTreeIndexConditionPtr MergeTreeIndexVectorSimilarity::createIndexCondition(const ActionsDAG *, ContextPtr) const +{ + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "MergeTreeIndexAnnoy cannot be created with ActionsDAG"); +} + +MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index) +{ + /// Default parameters: + unum::usearch::metric_kind_t metric_kind = distanceFunctionToMetricKind.at(index.arguments[1].safeGet()); + unum::usearch::scalar_kind_t scalar_kind = unum::usearch::scalar_kind_t::f32_k; + UsearchHnswParams usearch_hnsw_params; + + /// Optional parameters: + const bool has_six_args = (index.arguments.size() == 6); + if (has_six_args) + { + scalar_kind = quantizationToScalarKind.at(index.arguments[2].safeGet()); + usearch_hnsw_params = {.m = index.arguments[3].safeGet(), + .ef_construction = index.arguments[4].safeGet(), + .ef_search = index.arguments[5].safeGet()}; + } + + return std::make_shared(index, metric_kind, scalar_kind, usearch_hnsw_params); +} + +void vectorSimilarityIndexValidator(const IndexDescription & index, bool /* attach */) +{ + const bool has_two_args = (index.arguments.size() == 2); + const bool has_six_args = (index.arguments.size() == 6); + + /// Check number and type of arguments + if (!has_two_args && !has_six_args) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Vector similarity index must have two or six arguments"); + if (index.arguments[0].getType() != Field::Types::String) + throw Exception(ErrorCodes::INCORRECT_QUERY, "First argument of vector similarity index (method) must be of type String"); + if (index.arguments[1].getType() != Field::Types::String) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Second argument of vector similarity index (metric) must be of type String"); + if (has_six_args) + { + if (index.arguments[2].getType() != Field::Types::String) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Third argument of vector similarity index (quantization) must be of type String"); + if (index.arguments[3].getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Fourth argument of vector similarity index (M) must be of type UInt64"); + if (index.arguments[4].getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Fifth argument of vector similarity index (ef_construction) must be of type UInt64"); + if (index.arguments[5].getType() != Field::Types::UInt64) + throw Exception(ErrorCodes::INCORRECT_QUERY, "Sixth argument of vector similarity index (ef_search) must be of type UInt64"); + } + + /// Check that passed arguments are supported + if (!methods.contains(index.arguments[0].safeGet())) + throw Exception(ErrorCodes::INCORRECT_DATA, "First argument (method) of vector similarity index is not supported. Supported methods are: {}", joinByComma(methods)); + if (!distanceFunctionToMetricKind.contains(index.arguments[1].safeGet())) + throw Exception(ErrorCodes::INCORRECT_DATA, "Second argument (distance function) of vector similarity index is not supported. Supported distance function are: {}", joinByComma(distanceFunctionToMetricKind)); + if (has_six_args) + { + if (!quantizationToScalarKind.contains(index.arguments[2].safeGet())) + throw Exception(ErrorCodes::INCORRECT_DATA, "Third argument (quantization) of vector similarity index is not supported. Supported quantizations are: {}", joinByComma(quantizationToScalarKind)); + + /// Call Usearche's own parameter validation method for HNSW-specific parameters + UInt64 m = index.arguments[3].safeGet(); + UInt64 ef_construction = index.arguments[4].safeGet(); + UInt64 ef_search = index.arguments[5].safeGet(); + + unum::usearch::index_dense_config_t config(m, ef_construction, ef_search); + + if (auto error = config.validate(); error) + throw Exception(ErrorCodes::INCORRECT_DATA, "Invalid parameters passed to vector similarity index. Error: {}", String(error.release())); + } + + /// Check that the index is created on a single column + if (index.column_names.size() != 1 || index.data_types.size() != 1) + throw Exception(ErrorCodes::INCORRECT_NUMBER_OF_COLUMNS, "Vector similarity indexes must be created on a single column"); + + /// Check data type of the indexed column: + DataTypePtr data_type = index.sample_block.getDataTypes()[0]; + if (const auto * data_type_array = typeid_cast(data_type.get())) + { + TypeIndex nested_type_index = data_type_array->getNestedType()->getTypeId(); + if (!WhichDataType(nested_type_index).isFloat32()) + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); + } + else + { + throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Vector similarity indexes can only be created on columns of type Array(Float32)"); + } +} + +} + +#endif diff --git a/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h new file mode 100644 index 00000000000..c4c03254d2d --- /dev/null +++ b/src/Storages/MergeTree/MergeTreeIndexVectorSimilarity.h @@ -0,0 +1,174 @@ +#pragma once + +#include "config.h" + +#if USE_USEARCH + +#include +#include +#include + +namespace DB +{ + +struct UsearchHnswParams +{ + size_t m = unum::usearch::default_connectivity(); + size_t ef_construction = unum::usearch::default_expansion_add(); + size_t ef_search = unum::usearch::default_expansion_search(); +}; + +using USearchIndex = unum::usearch::index_dense_t; + +class USearchIndexWithSerialization : public USearchIndex +{ + using Base = USearchIndex; + +public: + USearchIndexWithSerialization( + size_t dimensions, + unum::usearch::metric_kind_t metric_kind, + unum::usearch::scalar_kind_t scalar_kind, + UsearchHnswParams usearch_hnsw_params); + + void serialize(WriteBuffer & ostr) const; + void deserialize(ReadBuffer & istr); + + struct Statistics + { + size_t max_level; + size_t connectivity; + size_t size; /// number of indexed vectors + size_t capacity; /// reserved number of indexed vectors + size_t memory_usage; /// byte size (not exact) + size_t bytes_per_vector; + size_t scalar_words; + size_t nodes; + size_t edges; + size_t max_edges; + + std::vector level_stats; /// for debugging, excluded from getStatistics() + + String toString() const; + }; + + Statistics getStatistics() const; +}; + +using USearchIndexWithSerializationPtr = std::shared_ptr; + + +struct MergeTreeIndexGranuleVectorSimilarity final : public IMergeTreeIndexGranule +{ + MergeTreeIndexGranuleVectorSimilarity( + const String & index_name_, + const Block & index_sample_block_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_); + + MergeTreeIndexGranuleVectorSimilarity( + const String & index_name_, + const Block & index_sample_block_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_, + USearchIndexWithSerializationPtr index_); + + ~MergeTreeIndexGranuleVectorSimilarity() override = default; + + void serializeBinary(WriteBuffer & ostr) const override; + void deserializeBinary(ReadBuffer & istr, MergeTreeIndexVersion version) override; + + bool empty() const override { return !index || index->size() == 0; } + + const String index_name; + const Block index_sample_block; + const unum::usearch::metric_kind_t metric_kind; + const unum::usearch::scalar_kind_t scalar_kind; + const UsearchHnswParams usearch_hnsw_params; + USearchIndexWithSerializationPtr index; + + LoggerPtr logger = getLogger("VectorSimilarityIndex"); + +private: + /// The version of the persistence format of USearch index. Increment whenever you change the format. + /// Note: USearch prefixes the serialized data with its own version header. We can't rely on that because 1. the index in ClickHouse + /// is (at least in theory) agnostic of specific vector search libraries, and 2. additional data (e.g. the number of dimensions) + /// outside USearch exists which we should version separately. + static constexpr UInt64 FILE_FORMAT_VERSION = 1; +}; + + +struct MergeTreeIndexAggregatorVectorSimilarity final : IMergeTreeIndexAggregator +{ + MergeTreeIndexAggregatorVectorSimilarity( + const String & index_name_, + const Block & index_sample_block, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_); + + ~MergeTreeIndexAggregatorVectorSimilarity() override = default; + + bool empty() const override { return !index || index->size() == 0; } + MergeTreeIndexGranulePtr getGranuleAndReset() override; + void update(const Block & block, size_t * pos, size_t limit) override; + + const String index_name; + const Block index_sample_block; + const unum::usearch::metric_kind_t metric_kind; + const unum::usearch::scalar_kind_t scalar_kind; + const UsearchHnswParams usearch_hnsw_params; + USearchIndexWithSerializationPtr index; +}; + + +class MergeTreeIndexConditionVectorSimilarity final : public IMergeTreeIndexCondition +{ +public: + MergeTreeIndexConditionVectorSimilarity( + const IndexDescription & index_description, + const SelectQueryInfo & query, + unum::usearch::metric_kind_t metric_kind_, + ContextPtr context); + + ~MergeTreeIndexConditionVectorSimilarity() override = default; + + bool alwaysUnknownOrTrue() const override; + bool mayBeTrueOnGranule(MergeTreeIndexGranulePtr granule) const override; + std::vector getUsefulRanges(MergeTreeIndexGranulePtr granule) const override; + +private: + const VectorSimilarityCondition vector_similarity_condition; + const unum::usearch::metric_kind_t metric_kind; +}; + + +class MergeTreeIndexVectorSimilarity : public IMergeTreeIndex +{ +public: + MergeTreeIndexVectorSimilarity( + const IndexDescription & index_, + unum::usearch::metric_kind_t metric_kind_, + unum::usearch::scalar_kind_t scalar_kind_, + UsearchHnswParams usearch_hnsw_params_); + + ~MergeTreeIndexVectorSimilarity() override = default; + + MergeTreeIndexGranulePtr createIndexGranule() const override; + MergeTreeIndexAggregatorPtr createIndexAggregator(const MergeTreeWriterSettings & settings) const override; + MergeTreeIndexConditionPtr createIndexCondition(const SelectQueryInfo & query, ContextPtr context) const; + MergeTreeIndexConditionPtr createIndexCondition(const ActionsDAG *, ContextPtr) const override; + bool isVectorSimilarityIndex() const override { return true; } + +private: + const unum::usearch::metric_kind_t metric_kind; + const unum::usearch::scalar_kind_t scalar_kind; + const UsearchHnswParams usearch_hnsw_params; +}; + +} + + +#endif diff --git a/src/Storages/MergeTree/MergeTreeIndices.cpp b/src/Storages/MergeTree/MergeTreeIndices.cpp index bded961db8e..d2fc0e84b56 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.cpp +++ b/src/Storages/MergeTree/MergeTreeIndices.cpp @@ -127,15 +127,21 @@ MergeTreeIndexFactory::MergeTreeIndexFactory() registerCreator("hypothesis", hypothesisIndexCreator); registerValidator("hypothesis", hypothesisIndexValidator); -#ifdef ENABLE_ANNOY - registerCreator("annoy", annoyIndexCreator); - registerValidator("annoy", annoyIndexValidator); -#endif -#ifdef ENABLE_USEARCH - registerCreator("usearch", usearchIndexCreator); - registerValidator("usearch", usearchIndexValidator); +#if USE_USEARCH + registerCreator("vector_similarity", vectorSimilarityIndexCreator); + registerValidator("vector_similarity", vectorSimilarityIndexValidator); #endif + /// ------ + /// TODO: remove this block at the end of 2024. + /// Index types 'annoy' and 'usearch' are no longer supported as of June 2024. Their successor is index type 'vector_similarity'. + /// To support loading tables with old indexes during a transition period, register dummy indexes which allow load/attaching but + /// throw an exception when the user attempts to use them. + registerCreator("annoy", legacyVectorSimilarityIndexCreator); + registerValidator("annoy", legacyVectorSimilarityIndexValidator); + registerCreator("usearch", legacyVectorSimilarityIndexCreator); + registerValidator("usearch", legacyVectorSimilarityIndexValidator); + /// ------ registerCreator("inverted", fullTextIndexCreator); registerValidator("inverted", fullTextIndexValidator); diff --git a/src/Storages/MergeTree/MergeTreeIndices.h b/src/Storages/MergeTree/MergeTreeIndices.h index 1be73e1c811..c52d7ffe131 100644 --- a/src/Storages/MergeTree/MergeTreeIndices.h +++ b/src/Storages/MergeTree/MergeTreeIndices.h @@ -15,6 +15,7 @@ #include #include +#include "config.h" constexpr auto INDEX_FILE_PREFIX = "skp_idx_"; @@ -92,6 +93,13 @@ public: virtual bool alwaysUnknownOrTrue() const = 0; virtual bool mayBeTrueOnGranule(MergeTreeIndexGranulePtr granule) const = 0; + + /// Special stuff for vector similarity indexes + /// - Returns vector of indexes of ranges in granule which are useful for query. + virtual std::vector getUsefulRanges(MergeTreeIndexGranulePtr) const + { + throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Not implemented for non-vector-similarity indexes."); + } }; using MergeTreeIndexConditionPtr = std::shared_ptr; @@ -169,7 +177,7 @@ struct IMergeTreeIndex virtual MergeTreeIndexConditionPtr createIndexCondition( const ActionsDAG * filter_actions_dag, ContextPtr context) const = 0; - virtual bool isVectorSearch() const { return false; } + virtual bool isVectorSimilarityIndex() const { return false; } virtual MergeTreeIndexMergedConditionPtr createIndexMergedCondition( const SelectQueryInfo & /*query_info*/, StorageMetadataPtr /*storage_metadata*/) const @@ -230,17 +238,15 @@ void bloomFilterIndexValidator(const IndexDescription & index, bool attach); MergeTreeIndexPtr hypothesisIndexCreator(const IndexDescription & index); void hypothesisIndexValidator(const IndexDescription & index, bool attach); -#ifdef ENABLE_ANNOY -MergeTreeIndexPtr annoyIndexCreator(const IndexDescription & index); -void annoyIndexValidator(const IndexDescription & index, bool attach); +#if USE_USEARCH +MergeTreeIndexPtr vectorSimilarityIndexCreator(const IndexDescription & index); +void vectorSimilarityIndexValidator(const IndexDescription & index, bool attach); #endif -#ifdef ENABLE_USEARCH -MergeTreeIndexPtr usearchIndexCreator(const IndexDescription& index); -void usearchIndexValidator(const IndexDescription& index, bool attach); -#endif +MergeTreeIndexPtr legacyVectorSimilarityIndexCreator(const IndexDescription & index); +void legacyVectorSimilarityIndexValidator(const IndexDescription & index, bool attach); -MergeTreeIndexPtr fullTextIndexCreator(const IndexDescription& index); -void fullTextIndexValidator(const IndexDescription& index, bool attach); +MergeTreeIndexPtr fullTextIndexCreator(const IndexDescription & index); +void fullTextIndexValidator(const IndexDescription & index, bool attach); } diff --git a/src/Storages/MergeTree/MergeTreePartition.cpp b/src/Storages/MergeTree/MergeTreePartition.cpp index b240f80ee13..5b5bc244f92 100644 --- a/src/Storages/MergeTree/MergeTreePartition.cpp +++ b/src/Storages/MergeTree/MergeTreePartition.cpp @@ -241,7 +241,7 @@ String MergeTreePartition::getID(const Block & partition_key_sample) const if (typeid_cast(partition_key_sample.getByPosition(i).type.get())) result += toString(DateLUT::serverTimezoneInstance().toNumYYYYMMDD(DayNum(value[i].safeGet()))); else if (typeid_cast(partition_key_sample.getByPosition(i).type.get())) - result += toString(value[i].get().toUnderType()); + result += toString(value[i].safeGet().toUnderType()); else result += applyVisitor(to_string_visitor, value[i]); diff --git a/src/Storages/MergeTree/MergeTreeSettings.h b/src/Storages/MergeTree/MergeTreeSettings.h index 5ba1988cc5d..d6d56272d7b 100644 --- a/src/Storages/MergeTree/MergeTreeSettings.h +++ b/src/Storages/MergeTree/MergeTreeSettings.h @@ -215,6 +215,7 @@ struct Settings; M(Float, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns, 0.9f, "If the value of a column of the primary key in data part changes at least in this ratio of times, skip loading next columns in memory. This allows to save memory usage by not loading useless columns of the primary key.", 0) \ /** Projection settings. */ \ M(UInt64, max_projections, 25, "The maximum number of merge tree projections.", 0) \ + M(LightweightMutationProjectionMode, lightweight_mutation_projection_mode, LightweightMutationProjectionMode::THROW, "When lightweight delete happens on a table with projection(s), the possible operations include throw the exception as projection exists, or drop projections of this table's relevant parts, or rebuild the projections.", 0) \ M(DeduplicateMergeProjectionMode, deduplicate_merge_projection_mode, DeduplicateMergeProjectionMode::THROW, "Whether to allow create projection for the table with non-classic MergeTree, if allowed, what is the action when merge, drop or rebuild.", 0) \ #define MAKE_OBSOLETE_MERGE_TREE_SETTING(M, TYPE, NAME, DEFAULT) \ diff --git a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp index 36ff6c0a4bd..9c82817e8cb 100644 --- a/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp +++ b/src/Storages/MergeTree/MergeTreeSplitPrewhereIntoReadSteps.cpp @@ -152,23 +152,15 @@ const ActionsDAG::Node & addFunction( const ActionsDAG::Node & addCast( const ActionsDAGPtr & dag, const ActionsDAG::Node & node_to_cast, - const String & type_name, + const DataTypePtr & to_type, OriginalToNewNodeMap & node_remap) { - if (node_to_cast.result_type->getName() == type_name) + if (!node_to_cast.result_type->equals(*to_type)) return node_to_cast; - Field cast_type_constant_value(type_name); - - ColumnWithTypeAndName column; - column.column = DataTypeString().createColumnConst(0, cast_type_constant_value); - column.type = std::make_shared(); - - const auto * cast_type_constant_node = &dag->addColumn(std::move(column)); - ActionsDAG::NodeRawConstPtrs children = {&node_to_cast, cast_type_constant_node}; - FunctionOverloadResolverPtr func_builder_cast = createInternalCastOverloadResolver(CastType::nonAccurate, {}); - - return addFunction(dag, func_builder_cast, std::move(children), node_remap); + const auto & new_node = dag->addCast(node_to_cast, to_type, {}); + node_remap[new_node.result_name] = {dag.get(), &new_node}; + return new_node; } /// Normalizes the filter node by adding AND with a constant true. @@ -332,7 +324,7 @@ bool tryBuildPrewhereSteps(PrewhereInfoPtr prewhere_info, const ExpressionAction /// Build AND(last_step_result_node, true) const auto & and_node = addAndTrue(last_step_dag, *last_step_result_node_info.node, node_remap); /// Build CAST(and_node, type of PREWHERE column) - const auto & cast_node = addCast(last_step_dag, and_node, output->result_type->getName(), node_remap); + const auto & cast_node = addCast(last_step_dag, and_node, output->result_type, node_remap); /// Add alias for the result with the name of the PREWHERE column const auto & prewhere_result_node = last_step_dag->addAlias(cast_node, output->result_name); last_step_dag->addOrReplaceInOutputs(prewhere_result_node); diff --git a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp index 43c40dee77d..f0c26c302e1 100644 --- a/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp +++ b/src/Storages/MergeTree/MergeTreeWhereOptimizer.cpp @@ -222,17 +222,17 @@ static bool isConditionGood(const RPNBuilderTreeNode & condition, const NameSet /// check the value with respect to threshold if (type == Field::Types::UInt64) { - const auto value = output_value.get(); + const auto value = output_value.safeGet(); return value > threshold; } else if (type == Field::Types::Int64) { - const auto value = output_value.get(); + const auto value = output_value.safeGet(); return value < -threshold || threshold < value; } else if (type == Field::Types::Float64) { - const auto value = output_value.get(); + const auto value = output_value.safeGet(); return value < -threshold || threshold < value; } diff --git a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp index 73084f487b9..56f68fd265a 100644 --- a/src/Storages/MergeTree/MutateFromLogEntryTask.cpp +++ b/src/Storages/MergeTree/MutateFromLogEntryTask.cpp @@ -254,6 +254,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit LOG_ERROR(log, "{}. Data after mutation is not byte-identical to data on another replicas. " "We will download merged part from replica to force byte-identical result.", getCurrentExceptionMessage(false)); + mutate_task->updateProfileEvents(); write_part_log(ExecutionStatus::fromCurrentException("", true)); if (storage.getSettings()->detach_not_byte_identical_parts) @@ -281,6 +282,7 @@ bool MutateFromLogEntryTask::finalize(ReplicatedMergeMutateTaskBase::PartLogWrit */ finish_callback = [storage_ptr = &storage]() { storage_ptr->merge_selecting_task->schedule(); }; ProfileEvents::increment(ProfileEvents::ReplicatedPartMutations); + mutate_task->updateProfileEvents(); write_part_log({}); return true; diff --git a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp index 9aec074deae..10461eb5942 100644 --- a/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp +++ b/src/Storages/MergeTree/MutatePlainMergeTreeTask.cpp @@ -102,6 +102,7 @@ bool MutatePlainMergeTreeTask::executeStep() transaction.commit(); storage.updateMutationEntriesErrors(future_part, true, ""); + mutate_task->updateProfileEvents(); write_part_log({}); state = State::NEED_FINISH; @@ -114,6 +115,7 @@ bool MutatePlainMergeTreeTask::executeStep() PreformattedMessage exception_message = getCurrentExceptionMessageAndPattern(/* with_stacktrace */ false); LOG_ERROR(getLogger("MutatePlainMergeTreeTask"), exception_message); storage.updateMutationEntriesErrors(future_part, false, exception_message.text); + mutate_task->updateProfileEvents(); write_part_log(ExecutionStatus::fromCurrentException("", true)); tryLogCurrentException(__PRETTY_FUNCTION__); return false; diff --git a/src/Storages/MergeTree/MutateTask.cpp b/src/Storages/MergeTree/MutateTask.cpp index 5fcf699de59..14c274d7f64 100644 --- a/src/Storages/MergeTree/MutateTask.cpp +++ b/src/Storages/MergeTree/MutateTask.cpp @@ -38,7 +38,13 @@ namespace ProfileEvents { -extern const Event MutateTaskProjectionsCalculationMicroseconds; + extern const Event MutationTotalParts; + extern const Event MutationUntouchedParts; + extern const Event MutationTotalMilliseconds; + extern const Event MutationExecuteMilliseconds; + extern const Event MutationAllPartColumns; + extern const Event MutationSomePartColumns; + extern const Event MutateTaskProjectionsCalculationMicroseconds; } namespace CurrentMetrics @@ -659,7 +665,7 @@ static NameSet collectFilesToSkip( const Block & updated_header, const std::set & indices_to_recalc, const String & mrk_extension, - const std::set & projections_to_recalc, + const std::set & projections_to_skip, const std::set & stats_to_recalc) { NameSet files_to_skip = source_part->getFileNamesWithoutChecksums(); @@ -684,7 +690,7 @@ static NameSet collectFilesToSkip( } } - for (const auto & projection : projections_to_recalc) + for (const auto & projection : projections_to_skip) files_to_skip.insert(projection->getDirectoryName()); for (const auto & stat : stats_to_recalc) @@ -1046,6 +1052,7 @@ struct MutationContext /// Whether we need to count lightweight delete rows in this mutation bool count_lightweight_deleted_rows; + UInt64 execute_elapsed_ns = 0; }; using MutationContextPtr = std::shared_ptr; @@ -1554,6 +1561,10 @@ private: removed_projections.insert(command.column_name); } + bool lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + bool lightweight_delete_drop = lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP; + const auto & projections = ctx->metadata_snapshot->getProjections(); for (const auto & projection : projections) { @@ -1561,10 +1572,11 @@ private: continue; bool need_recalculate = - ctx->materialized_projections.contains(projection.name) + (ctx->materialized_projections.contains(projection.name) || (!is_full_part_storage && ctx->source_part->hasProjection(projection.name) - && !ctx->source_part->hasBrokenProjection(projection.name)); + && !ctx->source_part->hasBrokenProjection(projection.name))) + && !lightweight_delete_drop; if (need_recalculate) { @@ -1572,7 +1584,7 @@ private: } else { - if (ctx->source_part->checksums.has(projection.getDirectoryName())) + if (!lightweight_delete_mode && ctx->source_part->checksums.has(projection.getDirectoryName())) entries_to_hardlink.insert(projection.getDirectoryName()); } } @@ -2017,6 +2029,9 @@ MutateTask::MutateTask( bool MutateTask::execute() { + Stopwatch watch; + SCOPE_EXIT({ ctx->execute_elapsed_ns += watch.elapsedNanoseconds(); }); + switch (state) { case State::NEED_PREPARE: @@ -2050,6 +2065,15 @@ bool MutateTask::execute() return false; } +void MutateTask::updateProfileEvents() const +{ + UInt64 total_elapsed_ms = (*ctx->mutate_entry)->watch.elapsedMilliseconds(); + UInt64 execute_elapsed_ms = ctx->execute_elapsed_ns / 1000000UL; + + ProfileEvents::increment(ProfileEvents::MutationTotalMilliseconds, total_elapsed_ms); + ProfileEvents::increment(ProfileEvents::MutationExecuteMilliseconds, execute_elapsed_ms); +} + static bool canSkipConversionToNullable(const MergeTreeDataPartPtr & part, const MutationCommand & command) { if (command.type != MutationCommand::READ_COLUMN) @@ -2112,6 +2136,7 @@ static bool canSkipMutationCommandForPart(const MergeTreeDataPartPtr & part, con bool MutateTask::prepare() { + ProfileEvents::increment(ProfileEvents::MutationTotalParts); MutationHelpers::checkOperationIsNotCanceled(*ctx->merges_blocker, ctx->mutate_entry); if (ctx->future_part->parts.size() != 1) @@ -2174,6 +2199,7 @@ bool MutateTask::prepare() ctx->temporary_directory_lock = std::move(lock); } + ProfileEvents::increment(ProfileEvents::MutationUntouchedParts); promise.set_value(std::move(part)); return false; } @@ -2198,6 +2224,8 @@ bool MutateTask::prepare() ctx->stage_progress = std::make_unique(1.0); + bool lightweight_delete_mode = false; + if (!ctx->for_interpreter.empty()) { /// Always disable filtering in mutations: we want to read and write all rows because for updates we rewrite only some of the @@ -2215,6 +2243,21 @@ bool MutateTask::prepare() ctx->mutating_pipeline_builder = ctx->interpreter->execute(); ctx->updated_header = ctx->interpreter->getUpdatedHeader(); ctx->progress_callback = MergeProgressCallback((*ctx->mutate_entry)->ptr(), ctx->watch_prev_elapsed, *ctx->stage_progress); + + lightweight_delete_mode = ctx->updated_header.has(RowExistsColumn::name); + /// If under the condition of lightweight delete mode with rebuild option, add projections again here as we can only know + /// the condition as early as from here. + if (lightweight_delete_mode + && ctx->data->getSettings()->lightweight_mutation_projection_mode == LightweightMutationProjectionMode::REBUILD) + { + for (const auto & projection : ctx->metadata_snapshot->getProjections()) + { + if (!ctx->source_part->hasProjection(projection.name)) + continue; + + ctx->materialized_projections.insert(projection.name); + } + } } auto single_disk_volume = std::make_shared("volume_" + ctx->future_part->name, ctx->space_reservation->getDisk(), 0); @@ -2256,7 +2299,7 @@ bool MutateTask::prepare() if (ctx->mutating_pipeline_builder.initialized()) ctx->execute_ttl_type = MutationHelpers::shouldExecuteTTL(ctx->metadata_snapshot, ctx->interpreter->getColumnDependencies()); - if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && ctx->updated_header.has(RowExistsColumn::name)) + if (ctx->data->getSettings()->exclude_deleted_rows_for_part_size_in_merge && lightweight_delete_mode) { /// This mutation contains lightweight delete and we need to count the deleted rows, /// Reset existing_rows_count of new data part to 0 and it will be updated while writing _row_exists column @@ -2283,6 +2326,7 @@ bool MutateTask::prepare() ctx->new_data_part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::REMOVE_BLOBS; task = std::make_unique(ctx); + ProfileEvents::increment(ProfileEvents::MutationAllPartColumns); } else /// TODO: check that we modify only non-key columns in this case. { @@ -2293,10 +2337,30 @@ bool MutateTask::prepare() ctx->context, ctx->materialized_indices); - ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( - ctx->source_part, - ctx->metadata_snapshot, - ctx->materialized_projections); + auto lightweight_mutation_projection_mode = ctx->data->getSettings()->lightweight_mutation_projection_mode; + bool lightweight_delete_drops_projections = + lightweight_mutation_projection_mode == LightweightMutationProjectionMode::DROP + || lightweight_mutation_projection_mode == LightweightMutationProjectionMode::THROW; + + std::set projections_to_skip_container; + auto * projections_to_skip = &projections_to_skip_container; + + bool should_create_projections = !(lightweight_delete_mode && lightweight_delete_drops_projections); + /// Under lightweight delete mode, if option is drop, projections_to_recalc should be empty. + if (should_create_projections) + { + ctx->projections_to_recalc = MutationHelpers::getProjectionsToRecalculate( + ctx->source_part, + ctx->metadata_snapshot, + ctx->materialized_projections); + + projections_to_skip = &ctx->projections_to_recalc; + } + else + { + for (const auto & projection : ctx->metadata_snapshot->getProjections()) + projections_to_skip->insert(&projection); + } ctx->stats_to_recalc = MutationHelpers::getStatisticsToRecalculate(ctx->metadata_snapshot, ctx->materialized_statistics); @@ -2306,7 +2370,7 @@ bool MutateTask::prepare() ctx->updated_header, ctx->indices_to_recalc, ctx->mrk_extension, - ctx->projections_to_recalc, + *projections_to_skip, ctx->stats_to_recalc); ctx->files_to_rename = MutationHelpers::collectFilesForRenames( @@ -2322,6 +2386,7 @@ bool MutateTask::prepare() ctx->new_data_part->remove_tmp_policy = IMergeTreeDataPart::BlobsRemovalPolicyForTemporaryParts::ASK_KEEPER; task = std::make_unique(ctx); + ProfileEvents::increment(ProfileEvents::MutationSomePartColumns); } return true; diff --git a/src/Storages/MergeTree/MutateTask.h b/src/Storages/MergeTree/MutateTask.h index dc22b90f0e9..08427bff6d8 100644 --- a/src/Storages/MergeTree/MutateTask.h +++ b/src/Storages/MergeTree/MutateTask.h @@ -39,6 +39,7 @@ public: bool need_prefix_); bool execute(); + void updateProfileEvents() const; std::future getFuture() { diff --git a/src/Storages/MergeTree/VectorSimilarityCondition.cpp b/src/Storages/MergeTree/VectorSimilarityCondition.cpp new file mode 100644 index 00000000000..2e53b4ecb3a --- /dev/null +++ b/src/Storages/MergeTree/VectorSimilarityCondition.cpp @@ -0,0 +1,350 @@ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace DB +{ + +namespace ErrorCodes +{ + extern const int LOGICAL_ERROR; + extern const int INCORRECT_QUERY; +} + +namespace +{ + +template +void extractReferenceVectorFromLiteral(std::vector & reference_vector, Literal literal) +{ + Float64 float_element_of_reference_vector; + Int64 int_element_of_reference_vector; + + for (const auto & value : literal.value()) + { + if (value.tryGet(float_element_of_reference_vector)) + reference_vector.emplace_back(float_element_of_reference_vector); + else if (value.tryGet(int_element_of_reference_vector)) + reference_vector.emplace_back(static_cast(int_element_of_reference_vector)); + else + throw Exception(ErrorCodes::INCORRECT_QUERY, "Wrong type of elements in reference vector. Only float or int are supported."); + } +} + +VectorSimilarityCondition::Info::DistanceFunction stringToDistanceFunction(std::string_view distance_function) +{ + if (distance_function == "L2Distance") + return VectorSimilarityCondition::Info::DistanceFunction::L2; + else + return VectorSimilarityCondition::Info::DistanceFunction::Unknown; +} + +} + +VectorSimilarityCondition::VectorSimilarityCondition(const SelectQueryInfo & query_info, ContextPtr context) + : block_with_constants(KeyCondition::getBlockWithConstants(query_info.query, query_info.syntax_analyzer_result, context)) + , index_granularity(context->getMergeTreeSettings().index_granularity) + , max_limit_for_ann_queries(context->getSettingsRef().max_limit_for_ann_queries) + , index_is_useful(checkQueryStructure(query_info)) +{} + +bool VectorSimilarityCondition::alwaysUnknownOrTrue(String distance_function) const +{ + if (!index_is_useful) + return true; /// query isn't supported + /// If query is supported, check if distance function of index is the same as distance function in query + return !(stringToDistanceFunction(distance_function) == query_information->distance_function); +} + +UInt64 VectorSimilarityCondition::getLimit() const +{ + if (index_is_useful && query_information.has_value()) + return query_information->limit; + throw Exception(ErrorCodes::LOGICAL_ERROR, "No LIMIT section in query, not supported"); +} + +std::vector VectorSimilarityCondition::getReferenceVector() const +{ + if (index_is_useful && query_information.has_value()) + return query_information->reference_vector; + throw Exception(ErrorCodes::LOGICAL_ERROR, "Reference vector was requested for useless or uninitialized index."); +} + +size_t VectorSimilarityCondition::getDimensions() const +{ + if (index_is_useful && query_information.has_value()) + return query_information->reference_vector.size(); + throw Exception(ErrorCodes::LOGICAL_ERROR, "Number of dimensions was requested for useless or uninitialized index."); +} + +String VectorSimilarityCondition::getColumnName() const +{ + if (index_is_useful && query_information.has_value()) + return query_information->column_name; + throw Exception(ErrorCodes::LOGICAL_ERROR, "Column name was requested for useless or uninitialized index."); +} + +VectorSimilarityCondition::Info::DistanceFunction VectorSimilarityCondition::getDistanceFunction() const +{ + if (index_is_useful && query_information.has_value()) + return query_information->distance_function; + throw Exception(ErrorCodes::LOGICAL_ERROR, "Distance function was requested for useless or uninitialized index."); +} + +bool VectorSimilarityCondition::checkQueryStructure(const SelectQueryInfo & query) +{ + Info order_by_info; + + /// Build rpns for query sections + const auto & select = query.query->as(); + + RPN rpn_order_by; + RPNElement rpn_limit; + UInt64 limit; + + if (select.limitLength()) + traverseAtomAST(select.limitLength(), rpn_limit); + + if (select.orderBy()) + traverseOrderByAST(select.orderBy(), rpn_order_by); + + /// Reverse RPNs for conveniences during parsing + std::reverse(rpn_order_by.begin(), rpn_order_by.end()); + + const bool order_by_is_valid = matchRPNOrderBy(rpn_order_by, order_by_info); + const bool limit_is_valid = matchRPNLimit(rpn_limit, limit); + + if (!limit_is_valid || limit > max_limit_for_ann_queries) + return false; + + if (order_by_is_valid) + { + query_information = std::move(order_by_info); + query_information->limit = limit; + return true; + } + + return false; +} + +void VectorSimilarityCondition::traverseAST(const ASTPtr & node, RPN & rpn) +{ + /// If the node is ASTFunction, it may have children nodes + if (const auto * func = node->as()) + { + const ASTs & children = func->arguments->children; + /// Traverse children nodes + for (const auto& child : children) + traverseAST(child, rpn); + } + + RPNElement element; + /// Get the data behind node + if (!traverseAtomAST(node, element)) + element.function = RPNElement::FUNCTION_UNKNOWN; + + rpn.emplace_back(std::move(element)); +} + +bool VectorSimilarityCondition::traverseAtomAST(const ASTPtr & node, RPNElement & out) +{ + /// Match Functions + if (const auto * function = node->as()) + { + /// Set the name + out.func_name = function->name; + + if (function->name == "L1Distance" || + function->name == "L2Distance" || + function->name == "LinfDistance" || + function->name == "cosineDistance" || + function->name == "dotProduct") + out.function = RPNElement::FUNCTION_DISTANCE; + else if (function->name == "array") + out.function = RPNElement::FUNCTION_ARRAY; + else if (function->name == "_CAST") + out.function = RPNElement::FUNCTION_CAST; + else + return false; + + return true; + } + /// Match identifier + else if (const auto * identifier = node->as()) + { + out.function = RPNElement::FUNCTION_IDENTIFIER; + out.identifier.emplace(identifier->name()); + out.func_name = "column identifier"; + + return true; + } + + /// Check if we have constants behind the node + return tryCastToConstType(node, out); +} + +bool VectorSimilarityCondition::tryCastToConstType(const ASTPtr & node, RPNElement & out) +{ + Field const_value; + DataTypePtr const_type; + + if (KeyCondition::getConstant(node, block_with_constants, const_value, const_type)) + { + /// Check for constant types + if (const_value.getType() == Field::Types::Float64) + { + out.function = RPNElement::FUNCTION_FLOAT_LITERAL; + out.float_literal.emplace(const_value.safeGet()); + out.func_name = "Float literal"; + return true; + } + + if (const_value.getType() == Field::Types::UInt64) + { + out.function = RPNElement::FUNCTION_INT_LITERAL; + out.int_literal.emplace(const_value.safeGet()); + out.func_name = "Int literal"; + return true; + } + + if (const_value.getType() == Field::Types::Int64) + { + out.function = RPNElement::FUNCTION_INT_LITERAL; + out.int_literal.emplace(const_value.safeGet()); + out.func_name = "Int literal"; + return true; + } + + if (const_value.getType() == Field::Types::Array) + { + out.function = RPNElement::FUNCTION_LITERAL_ARRAY; + out.array_literal = const_value.safeGet(); + out.func_name = "Array literal"; + return true; + } + + if (const_value.getType() == Field::Types::String) + { + out.function = RPNElement::FUNCTION_STRING_LITERAL; + out.func_name = const_value.safeGet(); + return true; + } + } + + return false; +} + +void VectorSimilarityCondition::traverseOrderByAST(const ASTPtr & node, RPN & rpn) +{ + if (const auto * expr_list = node->as()) + if (const auto * order_by_element = expr_list->children.front()->as()) + traverseAST(order_by_element->children.front(), rpn); +} + +/// Returns true and stores ANNExpr if the query has valid ORDERBY clause +bool VectorSimilarityCondition::matchRPNOrderBy(RPN & rpn, Info & info) +{ + /// ORDER BY clause must have at least 3 expressions + if (rpn.size() < 3) + return false; + + auto iter = rpn.begin(); + auto end = rpn.end(); + + bool identifier_found = false; + + /// Matches DistanceFunc->[Column]->[ArrayFunc]->ReferenceVector(floats)->[Column] + if (iter->function != RPNElement::FUNCTION_DISTANCE) + return false; + + info.distance_function = stringToDistanceFunction(iter->func_name); + ++iter; + + if (iter->function == RPNElement::FUNCTION_IDENTIFIER) + { + identifier_found = true; + info.column_name = std::move(iter->identifier.value()); + ++iter; + } + + if (iter->function == RPNElement::FUNCTION_ARRAY) + ++iter; + + if (iter->function == RPNElement::FUNCTION_LITERAL_ARRAY) + { + extractReferenceVectorFromLiteral(info.reference_vector, iter->array_literal); + ++iter; + } + + /// further conditions are possible if there is no array, or no identifier is found + /// the array can be inside a cast function. For other cases, see the loop after this condition + if (iter != end && iter->function == RPNElement::FUNCTION_CAST) + { + ++iter; + /// Cast should be made to array + if (!iter->func_name.starts_with("Array")) + return false; + ++iter; + if (iter->function == RPNElement::FUNCTION_LITERAL_ARRAY) + { + extractReferenceVectorFromLiteral(info.reference_vector, iter->array_literal); + ++iter; + } + else + return false; + } + + while (iter != end) + { + if (iter->function == RPNElement::FUNCTION_FLOAT_LITERAL || + iter->function == RPNElement::FUNCTION_INT_LITERAL) + info.reference_vector.emplace_back(getFloatOrIntLiteralOrPanic(iter)); + else if (iter->function == RPNElement::FUNCTION_IDENTIFIER) + { + if (identifier_found) + return false; + info.column_name = std::move(iter->identifier.value()); + identifier_found = true; + } + else + return false; + + ++iter; + } + + /// Final checks of correctness + return identifier_found && !info.reference_vector.empty(); +} + +/// Returns true and stores Length if we have valid LIMIT clause in query +bool VectorSimilarityCondition::matchRPNLimit(RPNElement & rpn, UInt64 & limit) +{ + if (rpn.function == RPNElement::FUNCTION_INT_LITERAL) + { + limit = rpn.int_literal.value(); + return true; + } + + return false; +} + +/// Gets float or int from AST node +float VectorSimilarityCondition::getFloatOrIntLiteralOrPanic(const RPN::iterator& iter) +{ + if (iter->float_literal.has_value()) + return iter->float_literal.value(); + if (iter->int_literal.has_value()) + return static_cast(iter->int_literal.value()); + throw Exception(ErrorCodes::INCORRECT_QUERY, "Wrong parsed AST in buildRPN\n"); +} + +} diff --git a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h b/src/Storages/MergeTree/VectorSimilarityCondition.h similarity index 54% rename from src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h rename to src/Storages/MergeTree/VectorSimilarityCondition.h index 5da2a714b02..fd339ed715d 100644 --- a/src/Storages/MergeTree/ApproximateNearestNeighborIndexesCommon.h +++ b/src/Storages/MergeTree/VectorSimilarityCondition.h @@ -9,52 +9,9 @@ namespace DB { -static constexpr auto DISTANCE_FUNCTION_L2 = "L2Distance"; -static constexpr auto DISTANCE_FUNCTION_COSINE = "cosineDistance"; - -/// Approximate Nearest Neighbour queries have a similar structure: -/// - reference vector from which all distances are calculated -/// - metric name (e.g L2Distance, LpDistance, etc.) -/// - name of column with embeddings -/// - type of query -/// - maximum number of returned elements (LIMIT) -/// -/// And two optional parameters: -/// - p for LpDistance function -/// - distance to compare with (only for where queries) -/// -/// This struct holds all these components. -struct ApproximateNearestNeighborInformation -{ - using Embedding = std::vector; - Embedding reference_vector; - - enum class Metric : uint8_t - { - Unknown, - L2, - Lp - }; - Metric metric; - - String column_name; - UInt64 limit; - - enum class Type : uint8_t - { - OrderBy, - Where - }; - Type type; - - float p_for_lp_dist = -1.0; - float distance = -1.0; -}; - - -// Class ANNCondition, is responsible for recognizing if the query is an ANN queries which can utilize ANN indexes. It parses the SQL query -/// and checks if it matches ANNIndexes. Method alwaysUnknownOrTrue returns false if we can speed up the query, and true otherwise. It has -/// only one argument, the name of the metric with which index was built. Two main patterns of queries are supported +/// Class VectorSimilarityCondition is responsible for recognizing if the query can utilize vector similarity indexes. +/// Method alwaysUnknownOrTrue returns false if we can speed up the query, and true otherwise. It has +/// only one argument, the name of the distance function with which index was built. Two main patterns of queries are supported /// /// - 1. WHERE queries: /// SELECT * FROM * WHERE DistanceFunc(column, reference_vector) < floatLiteral LIMIT count @@ -64,14 +21,14 @@ struct ApproximateNearestNeighborInformation /// /// Queries without LIMIT count are not supported /// If the query is both of type 1. and 2., than we can't use the index and alwaysUnknownOrTrue returns true. -/// reference_vector should have float coordinates, e.g. (0.2, 0.1, .., 0.5) +/// reference_vector should have float coordinates, e.g. [0.2, 0.1, .., 0.5] /// -/// If the query matches one of these two types, then this class extracts the main information needed for ANN indexes from the query. +/// If the query matches one of these two types, then this class extracts the main information needed for vector similarity indexes from the +/// query. /// /// From matching query it extracts /// - referenceVector -/// - metricName(DistanceFunction) -/// - dimension size if query uses LpDistance +/// - distance function /// - distance to compare(ONLY for search types, otherwise you get exception) /// - spaceDimension(which is referenceVector's components count) /// - column @@ -79,35 +36,45 @@ struct ApproximateNearestNeighborInformation /// - queryHasOrderByClause and queryHasWhereClause return true if query matches the type /// /// Search query type is also recognized for PREWHERE clause -class ApproximateNearestNeighborCondition +class VectorSimilarityCondition { public: - ApproximateNearestNeighborCondition(const SelectQueryInfo & query_info, ContextPtr context); + VectorSimilarityCondition(const SelectQueryInfo & query_info, ContextPtr context); + + /// Approximate nearest neighbour (ANN) / vector similarity queries have a similar structure: + /// - reference vector from which all distances are calculated + /// - distance function, e.g L2Distance + /// - name of column with embeddings + /// - type of query + /// - maximum number of returned elements (LIMIT) + /// + /// And one optional parameter: + /// - distance to compare with (only for where queries) + /// + /// This struct holds all these components. + struct Info + { + enum class DistanceFunction : uint8_t + { + Unknown, + L2 + }; + + std::vector reference_vector; + DistanceFunction distance_function; + String column_name; + UInt64 limit; + float distance = -1.0; + }; /// Returns false if query can be speeded up by an ANN index, true otherwise. - bool alwaysUnknownOrTrue(String metric) const; + bool alwaysUnknownOrTrue(String distance_function) const; - /// Returns the distance to compare with for search query - float getComparisonDistanceForWhereQuery() const; - - /// Distance should be calculated regarding to referenceVector std::vector getReferenceVector() const; - - /// Reference vector's dimension count size_t getDimensions() const; - String getColumnName() const; - - ApproximateNearestNeighborInformation::Metric getMetricType() const; - - /// The P- value if the metric is 'LpDistance' - float getPValueForLpDistance() const; - - ApproximateNearestNeighborInformation::Type getQueryType() const; - + Info::DistanceFunction getDistanceFunction() const; UInt64 getIndexGranularity() const { return index_granularity; } - - /// Length's value from LIMIT clause UInt64 getLimit() const; private: @@ -118,9 +85,6 @@ private: /// DistanceFunctions FUNCTION_DISTANCE, - //tuple(0.1, ..., 0.1) - FUNCTION_TUPLE, - //array(0.1, ..., 0.1) FUNCTION_ARRAY, @@ -139,9 +103,6 @@ private: /// Unknown, can be any value FUNCTION_UNKNOWN, - /// (0.1, ...., 0.1) vector without word 'tuple' - FUNCTION_LITERAL_TUPLE, - /// [0.1, ...., 0.1] vector without word 'array' FUNCTION_LITERAL_ARRAY, @@ -154,19 +115,14 @@ private: explicit RPNElement(Function function_ = FUNCTION_UNKNOWN) : function(function_) - , func_name("Unknown") - , float_literal(std::nullopt) - , identifier(std::nullopt) {} Function function; - String func_name; + String func_name = "Unknown"; std::optional float_literal; std::optional identifier; std::optional int_literal; - - std::optional tuple_literal; std::optional array_literal; UInt32 dim = 0; @@ -186,16 +142,16 @@ private: void traverseOrderByAST(const ASTPtr & node, RPN & rpn); /// Returns true and stores ANNExpr if the query has valid WHERE section - static bool matchRPNWhere(RPN & rpn, ApproximateNearestNeighborInformation & ann_info); + static bool matchRPNWhere(RPN & rpn, Info & info); /// Returns true and stores ANNExpr if the query has valid ORDERBY section - static bool matchRPNOrderBy(RPN & rpn, ApproximateNearestNeighborInformation & ann_info); + static bool matchRPNOrderBy(RPN & rpn, Info & info); /// Returns true and stores Length if we have valid LIMIT clause in query static bool matchRPNLimit(RPNElement & rpn, UInt64 & limit); - /* Matches dist function, reference vector, column name */ - static bool matchMainParts(RPN::iterator & iter, const RPN::iterator & end, ApproximateNearestNeighborInformation & ann_info); + /// Matches dist function, reference vector, column name + static bool matchMainParts(RPN::iterator & iter, const RPN::iterator & end, Info & info); /// Gets float or int from AST node static float getFloatOrIntLiteralOrPanic(const RPN::iterator& iter); @@ -203,7 +159,7 @@ private: Block block_with_constants; /// true if we have one of two supported query types - std::optional query_information; + std::optional query_information; // Get from settings ANNIndex parameters const UInt64 index_granularity; @@ -214,13 +170,4 @@ private: bool index_is_useful = false; }; - -/// Common interface of ANN indexes. -class IMergeTreeIndexConditionApproximateNearestNeighbor : public IMergeTreeIndexCondition -{ -public: - /// Returns vector of indexes of ranges in granule which are useful for query. - virtual std::vector getUsefulRanges(MergeTreeIndexGranulePtr idx_granule) const = 0; -}; - } diff --git a/src/Storages/MergeTree/registerStorageMergeTree.cpp b/src/Storages/MergeTree/registerStorageMergeTree.cpp index b7887c35590..44548e33d46 100644 --- a/src/Storages/MergeTree/registerStorageMergeTree.cpp +++ b/src/Storages/MergeTree/registerStorageMergeTree.cpp @@ -592,7 +592,7 @@ static StoragePtr create(const StorageFactory::Arguments & args) if (ast->value.getType() != Field::Types::String) throw Exception(ErrorCodes::BAD_ARGUMENTS, format_str, error_msg); - graphite_config_name = ast->value.get(); + graphite_config_name = ast->value.safeGet(); } else throw Exception(ErrorCodes::BAD_ARGUMENTS, format_str, error_msg); diff --git a/src/Storages/MessageQueueSink.cpp b/src/Storages/MessageQueueSink.cpp index 9136300ab3b..8190a375b97 100644 --- a/src/Storages/MessageQueueSink.cpp +++ b/src/Storages/MessageQueueSink.cpp @@ -47,6 +47,8 @@ void MessageQueueSink::consume(Chunk & chunk) if (columns.empty()) return; + /// The formatter might hold pointers to buffer (e.g. if PeekableWriteBuffer is used), which means the formatter + /// needs to be reset after buffer might reallocate its memory. In this exact case after restarting the buffer. if (row_format) { size_t row = 0; @@ -61,12 +63,12 @@ void MessageQueueSink::consume(Chunk & chunk) row_format->writeRow(columns, row); } row_format->finalize(); - row_format->resetFormatter(); producer->produce(buffer->str(), i, columns, row - 1); /// Reallocate buffer if it's capacity is large then DBMS_DEFAULT_BUFFER_SIZE, /// because most likely in this case we serialized abnormally large row /// and won't need this large allocated buffer anymore. buffer->restart(DBMS_DEFAULT_BUFFER_SIZE); + row_format->resetFormatter(); } } else @@ -74,12 +76,11 @@ void MessageQueueSink::consume(Chunk & chunk) format->write(getHeader().cloneWithColumns(chunk.detachColumns())); format->finalize(); producer->produce(buffer->str(), chunk.getNumRows(), columns, chunk.getNumRows() - 1); - format->resetFormatter(); buffer->restart(); + format->resetFormatter(); } } - void MessageQueueSink::onCancel() noexcept { try diff --git a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h index c8603fccb86..087207d3860 100644 --- a/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h +++ b/src/Storages/ObjectStorage/DataLakes/IStorageDataLake.h @@ -91,8 +91,9 @@ public: { ConfigurationPtr configuration = base_configuration->clone(); configuration->setPaths(metadata->getDataFiles()); + std::string sample_path; return Storage::resolveSchemaFromData( - object_storage_, configuration, format_settings_, local_context); + object_storage_, configuration, format_settings_, sample_path, local_context); } } diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.cpp b/src/Storages/ObjectStorage/ReadBufferIterator.cpp index df78f128c80..fadf683fce7 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.cpp +++ b/src/Storages/ObjectStorage/ReadBufferIterator.cpp @@ -132,7 +132,7 @@ void ReadBufferIterator::setFormatName(const String & format_name) format = format_name; } -String ReadBufferIterator::getLastFileName() const +String ReadBufferIterator::getLastFilePath() const { if (current_object_info) return current_object_info->getPath(); diff --git a/src/Storages/ObjectStorage/ReadBufferIterator.h b/src/Storages/ObjectStorage/ReadBufferIterator.h index 6eeb52ec2ed..b81aebb7b07 100644 --- a/src/Storages/ObjectStorage/ReadBufferIterator.h +++ b/src/Storages/ObjectStorage/ReadBufferIterator.h @@ -33,7 +33,7 @@ public: void setResultingSchema(const ColumnsDescription & columns) override; - String getLastFileName() const override; + String getLastFilePath() const override; void setFormatName(const String & format_name) override; diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.cpp b/src/Storages/ObjectStorage/StorageObjectStorage.cpp index fe675a17d54..d9c82d68791 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorage.cpp @@ -1,4 +1,5 @@ #include +#include #include #include @@ -33,6 +34,33 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +String StorageObjectStorage::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) +{ + auto query_settings = configuration->getQuerySettings(context); + /// We don't want to throw an exception if there are no files with specified path. + query_settings.throw_on_zero_files_match = false; + + bool local_distributed_processing = distributed_processing; + if (context->getSettingsRef().use_hive_partitioning) + local_distributed_processing = false; + + auto file_iterator = StorageObjectStorageSource::createFileIterator( + configuration, + query_settings, + object_storage, + local_distributed_processing, + context, + {}, // predicate + metadata.getColumns().getAll(), // virtual_columns + nullptr, // read_keys + {} // file_progress_callback + ); + + if (auto file = file_iterator->next(0)) + return file->getPath(); + return ""; +} + StorageObjectStorage::StorageObjectStorage( ConfigurationPtr configuration_, ObjectStoragePtr object_storage_, @@ -53,7 +81,9 @@ StorageObjectStorage::StorageObjectStorage( , log(getLogger(fmt::format("Storage{}({})", configuration->getEngineName(), table_id_.getFullTableName()))) { ColumnsDescription columns{columns_}; - resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, context); + + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context); configuration->check(context); StorageInMemoryMetadata metadata; @@ -61,7 +91,10 @@ StorageObjectStorage::StorageObjectStorage( metadata.setConstraints(constraints_); metadata.setComment(comment); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns())); + if (sample_path.empty() && context->getSettingsRef().use_hive_partitioning) + sample_path = getPathSample(metadata, context); + + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context, sample_path, format_settings)); setInMemoryMetadata(metadata); } @@ -198,7 +231,7 @@ private: return; auto context = getContext(); iterator_wrapper = StorageObjectStorageSource::createFileIterator( - configuration, object_storage, distributed_processing, + configuration, configuration->getQuerySettings(context), object_storage, distributed_processing, context, predicate, virtual_columns, nullptr, context->getFileProgressCallback()); } }; @@ -350,6 +383,7 @@ std::unique_ptr StorageObjectStorage::createReadBufferIterat { auto file_iterator = StorageObjectStorageSource::createFileIterator( configuration, + configuration->getQuerySettings(context), object_storage, false/* distributed_processing */, context, @@ -366,33 +400,41 @@ ColumnsDescription StorageObjectStorage::resolveSchemaFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context) { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - return readSchemaFromFormat(configuration->format, format_settings, *iterator, context); + auto schema = readSchemaFromFormat(configuration->format, format_settings, *iterator, context); + sample_path = iterator->getLastFilePath(); + return schema; } std::string StorageObjectStorage::resolveFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context) { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); - return detectFormatAndReadSchema(format_settings, *iterator, context).second; + auto format_and_schema = detectFormatAndReadSchema(format_settings, *iterator, context).second; + sample_path = iterator->getLastFilePath(); + return format_and_schema; } std::pair StorageObjectStorage::resolveSchemaAndFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context) { ObjectInfos read_keys; auto iterator = createReadBufferIterator(object_storage, configuration, format_settings, read_keys, context); auto [columns, format] = detectFormatAndReadSchema(format_settings, *iterator, context); + sample_path = iterator->getLastFilePath(); configuration->format = format; return std::pair(columns, format); } diff --git a/src/Storages/ObjectStorage/StorageObjectStorage.h b/src/Storages/ObjectStorage/StorageObjectStorage.h index 818ce055c77..cae0db48f31 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorage.h +++ b/src/Storages/ObjectStorage/StorageObjectStorage.h @@ -102,23 +102,28 @@ public: const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context); static std::string resolveFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context); static std::pair resolveSchemaAndFormatFromData( const ObjectStoragePtr & object_storage, const ConfigurationPtr & configuration, const std::optional & format_settings, + std::string & sample_path, const ContextPtr & context); protected: virtual void updateConfiguration(ContextPtr local_context); + String getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); + virtual ReadFromFormatInfo prepareReadingFromFormat( const Strings & requested_columns, const StorageSnapshotPtr & storage_snapshot, diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp index 78f568d8ae2..c214665f7e0 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.cpp @@ -1,6 +1,8 @@ #include "Storages/ObjectStorage/StorageObjectStorageCluster.h" #include +#include +#include #include #include #include @@ -19,6 +21,28 @@ namespace ErrorCodes extern const int LOGICAL_ERROR; } +String StorageObjectStorageCluster::getPathSample(StorageInMemoryMetadata metadata, ContextPtr context) +{ + auto query_settings = configuration->getQuerySettings(context); + /// We don't want to throw an exception if there are no files with specified path. + query_settings.throw_on_zero_files_match = false; + auto file_iterator = StorageObjectStorageSource::createFileIterator( + configuration, + query_settings, + object_storage, + false, // distributed_processing + context, + {}, // predicate + metadata.getColumns().getAll(), // virtual_columns + nullptr, // read_keys + {} // file_progress_callback + ); + + if (auto file = file_iterator->next(0)) + return file->getPath(); + return ""; +} + StorageObjectStorageCluster::StorageObjectStorageCluster( const String & cluster_name_, ConfigurationPtr configuration_, @@ -33,14 +57,18 @@ StorageObjectStorageCluster::StorageObjectStorageCluster( , object_storage(object_storage_) { ColumnsDescription columns{columns_}; - resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, {}, context_); + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, {}, sample_path, context_); configuration->check(context_); StorageInMemoryMetadata metadata; metadata.setColumns(columns); metadata.setConstraints(constraints_); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns())); + if (sample_path.empty() && context_->getSettingsRef().use_hive_partitioning) + sample_path = getPathSample(metadata, context_); + + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(metadata.getColumns(), context_, sample_path)); setInMemoryMetadata(metadata); } @@ -83,8 +111,8 @@ RemoteQueryExecutor::Extension StorageObjectStorageCluster::getTaskIteratorExten const ActionsDAG::Node * predicate, const ContextPtr & local_context) const { auto iterator = StorageObjectStorageSource::createFileIterator( - configuration, object_storage, /* distributed_processing */false, local_context, - predicate, virtual_columns, nullptr, local_context->getFileProgressCallback()); + configuration, configuration->getQuerySettings(local_context), object_storage, /* distributed_processing */false, + local_context, predicate, virtual_columns, nullptr, local_context->getFileProgressCallback()); auto callback = std::make_shared>([iterator]() mutable -> String { diff --git a/src/Storages/ObjectStorage/StorageObjectStorageCluster.h b/src/Storages/ObjectStorage/StorageObjectStorageCluster.h index 108aa109616..0088ff28fc2 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageCluster.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageCluster.h @@ -27,6 +27,8 @@ public: RemoteQueryExecutor::Extension getTaskIteratorExtension( const ActionsDAG::Node * predicate, const ContextPtr & context) const override; + String getPathSample(StorageInMemoryMetadata metadata, ContextPtr context); + private: void updateQueryToSendIfNeeded( ASTPtr & query, diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp index 320799c7166..d8e26977e75 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.cpp @@ -99,6 +99,7 @@ std::string StorageObjectStorageSource::getUniqueStoragePathIdentifier( std::shared_ptr StorageObjectStorageSource::createFileIterator( ConfigurationPtr configuration, + const StorageObjectStorage::QuerySettings & query_settings, ObjectStoragePtr object_storage, bool distributed_processing, const ContextPtr & local_context, @@ -116,7 +117,6 @@ std::shared_ptr StorageObjectStorageSourc throw Exception(ErrorCodes::BAD_ARGUMENTS, "Expression can not have wildcards inside {} name", configuration->getNamespaceType()); - auto settings = configuration->getQuerySettings(local_context); const bool is_archive = configuration->isArchive(); std::unique_ptr iterator; @@ -125,8 +125,8 @@ std::shared_ptr StorageObjectStorageSourc /// Iterate through disclosed globs and make a source for each file iterator = std::make_unique( object_storage, configuration, predicate, virtual_columns, - local_context, is_archive ? nullptr : read_keys, settings.list_object_keys_size, - settings.throw_on_zero_files_match, file_progress_callback); + local_context, is_archive ? nullptr : read_keys, query_settings.list_object_keys_size, + query_settings.throw_on_zero_files_match, file_progress_callback); } else { @@ -148,7 +148,7 @@ std::shared_ptr StorageObjectStorageSourc iterator = std::make_unique( object_storage, copy_configuration, virtual_columns, is_archive ? nullptr : read_keys, - settings.ignore_non_existent_file, file_progress_callback); + query_settings.ignore_non_existent_file, file_progress_callback); } if (is_archive) @@ -198,15 +198,17 @@ Chunk StorageObjectStorageSource::generate() const auto & object_info = reader.getObjectInfo(); const auto & filename = object_info->getFileName(); chassert(object_info->metadata); + VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, read_from_format_info.requested_virtual_columns, - {.path = getUniqueStoragePathIdentifier(*configuration, *object_info, false), - .size = object_info->isArchive() ? object_info->fileSizeInArchive() : object_info->metadata->size_bytes, - .filename = &filename, - .last_modified = object_info->metadata->last_modified, - .etag = &(object_info->metadata->etag) - }); + { + .path = getUniqueStoragePathIdentifier(*configuration, *object_info, false), + .size = object_info->isArchive() ? object_info->fileSizeInArchive() : object_info->metadata->size_bytes, + .filename = &filename, + .last_modified = object_info->metadata->last_modified, + .etag = &(object_info->metadata->etag) + }, getContext(), read_from_format_info.columns_description); const auto & partition_columns = configuration->getPartitionColumns(); if (!partition_columns.empty() && chunk_size && chunk.hasColumns()) diff --git a/src/Storages/ObjectStorage/StorageObjectStorageSource.h b/src/Storages/ObjectStorage/StorageObjectStorageSource.h index e466621e1e1..6681dbf4578 100644 --- a/src/Storages/ObjectStorage/StorageObjectStorageSource.h +++ b/src/Storages/ObjectStorage/StorageObjectStorageSource.h @@ -52,6 +52,7 @@ public: static std::shared_ptr createFileIterator( ConfigurationPtr configuration, + const StorageObjectStorage::QuerySettings & query_settings, ObjectStoragePtr object_storage, bool distributed_processing, const ContextPtr & local_context, diff --git a/src/Storages/ObjectStorage/Utils.cpp b/src/Storages/ObjectStorage/Utils.cpp index e49e14d2a0c..73410d959e0 100644 --- a/src/Storages/ObjectStorage/Utils.cpp +++ b/src/Storages/ObjectStorage/Utils.cpp @@ -49,19 +49,20 @@ void resolveSchemaAndFormat( ObjectStoragePtr object_storage, const StorageObjectStorage::ConfigurationPtr & configuration, std::optional format_settings, + std::string & sample_path, const ContextPtr & context) { if (columns.empty()) { if (format == "auto") std::tie(columns, format) = - StorageObjectStorage::resolveSchemaAndFormatFromData(object_storage, configuration, format_settings, context); + StorageObjectStorage::resolveSchemaAndFormatFromData(object_storage, configuration, format_settings, sample_path, context); else - columns = StorageObjectStorage::resolveSchemaFromData(object_storage, configuration, format_settings, context); + columns = StorageObjectStorage::resolveSchemaFromData(object_storage, configuration, format_settings, sample_path, context); } else if (format == "auto") { - format = StorageObjectStorage::resolveFormatFromData(object_storage, configuration, format_settings, context); + format = StorageObjectStorage::resolveFormatFromData(object_storage, configuration, format_settings, sample_path, context); } if (!columns.hasOnlyOrdinary()) diff --git a/src/Storages/ObjectStorage/Utils.h b/src/Storages/ObjectStorage/Utils.h index 2077999df41..7ee14f50979 100644 --- a/src/Storages/ObjectStorage/Utils.h +++ b/src/Storages/ObjectStorage/Utils.h @@ -19,6 +19,7 @@ void resolveSchemaAndFormat( ObjectStoragePtr object_storage, const StorageObjectStorage::ConfigurationPtr & configuration, std::optional format_settings, + std::string & sample_path, const ContextPtr & context); } diff --git a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp index 4d921003e04..2634a7b2f1e 100644 --- a/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp +++ b/src/Storages/ObjectStorageQueue/ObjectStorageQueueSource.cpp @@ -524,7 +524,7 @@ Chunk ObjectStorageQueueSource::generateImpl() { .path = path, .size = reader.getObjectInfo()->metadata->size_bytes - }); + }, getContext(), read_from_format_info.columns_description); return chunk; } diff --git a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp index f51a7a913b8..5dc3e01962c 100644 --- a/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp +++ b/src/Storages/ObjectStorageQueue/StorageObjectStorageQueue.cpp @@ -161,14 +161,15 @@ StorageObjectStorageQueue::StorageObjectStorageQueue( configuration->check(context_); ColumnsDescription columns{columns_}; - resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, context_); + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, object_storage, configuration, format_settings, sample_path, context_); configuration->check(context_); StorageInMemoryMetadata storage_metadata; storage_metadata.setColumns(columns); storage_metadata.setConstraints(constraints_); storage_metadata.setComment(comment); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_)); setInMemoryMetadata(storage_metadata); LOG_INFO(log, "Using zookeeper path: {}", zk_path.string()); diff --git a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp index f632e553a0d..01f78673ed8 100644 --- a/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp +++ b/src/Storages/PostgreSQL/PostgreSQLReplicationHandler.cpp @@ -659,7 +659,7 @@ void PostgreSQLReplicationHandler::dropReplicationSlot(pqxx::nontransaction & tx void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) { - std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", publication_name); + std::string query_str = fmt::format("DROP PUBLICATION IF EXISTS {}", doubleQuoteString(publication_name)); tx.exec(query_str); LOG_DEBUG(log, "Dropped publication: {}", publication_name); } @@ -667,7 +667,7 @@ void PostgreSQLReplicationHandler::dropPublication(pqxx::nontransaction & tx) void PostgreSQLReplicationHandler::addTableToPublication(pqxx::nontransaction & ntx, const String & table_name) { - std::string query_str = fmt::format("ALTER PUBLICATION {} ADD TABLE ONLY {}", publication_name, doubleQuoteWithSchema(table_name)); + std::string query_str = fmt::format("ALTER PUBLICATION {} ADD TABLE ONLY {}", doubleQuoteString(publication_name), doubleQuoteWithSchema(table_name)); ntx.exec(query_str); LOG_TRACE(log, "Added table {} to publication `{}`", doubleQuoteWithSchema(table_name), publication_name); } diff --git a/src/Storages/Statistics/StatisticsCountMinSketch.cpp b/src/Storages/Statistics/StatisticsCountMinSketch.cpp index 0dc01f5fcf0..6dbd0625d3d 100644 --- a/src/Storages/Statistics/StatisticsCountMinSketch.cpp +++ b/src/Storages/Statistics/StatisticsCountMinSketch.cpp @@ -48,7 +48,7 @@ Float64 StatisticsCountMinSketch::estimateEqual(const Field & val) const return sketch.get_estimate(&val_converted, data_type->getSizeOfValueInMemory()); if (isStringOrFixedString(data_type)) - return sketch.get_estimate(val.get()); + return sketch.get_estimate(val.safeGet()); throw Exception(ErrorCodes::LOGICAL_ERROR, "Statistics 'count_min' does not support estimate data type of {}", data_type->getName()); } diff --git a/src/Storages/StorageFactory.cpp b/src/Storages/StorageFactory.cpp index 060b271d8f4..b95ccedb093 100644 --- a/src/Storages/StorageFactory.cpp +++ b/src/Storages/StorageFactory.cpp @@ -203,7 +203,7 @@ StoragePtr StorageFactory::get( } if (query.comment) - comment = query.comment->as().value.get(); + comment = query.comment->as().value.safeGet(); ASTs empty_engine_args; Arguments arguments{ diff --git a/src/Storages/StorageFile.cpp b/src/Storages/StorageFile.cpp index 324c88d643d..766b7722cdf 100644 --- a/src/Storages/StorageFile.cpp +++ b/src/Storages/StorageFile.cpp @@ -53,6 +53,7 @@ #include #include #include +#include #include "base/defines.h" #include @@ -516,7 +517,7 @@ namespace StorageFile::getSchemaCache(getContext()).addManyColumns(cache_keys, columns); } - String getLastFileName() const override + String getLastFilePath() const override { if (current_index != 0) return paths[current_index - 1]; @@ -793,7 +794,7 @@ namespace format = format_name; } - String getLastFileName() const override + String getLastFilePath() const override { return last_read_file_path; } @@ -1112,7 +1113,8 @@ void StorageFile::setStorageMetadata(CommonArguments args) storage_metadata.setConstraints(args.constraints); storage_metadata.setComment(args.comment); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), args.getContext(), paths.empty() ? "" : paths[0], format_settings)); } @@ -1466,7 +1468,7 @@ Chunk StorageFileSource::generate() .size = current_file_size, .filename = (filename_override.has_value() ? &filename_override.value() : nullptr), .last_modified = current_file_last_modified - }); + }, getContext(), columns_description); return chunk; } @@ -2185,12 +2187,12 @@ void registerStorageFile(StorageFactory & factory) { auto type = literal->value.getType(); if (type == Field::Types::Int64) - source_fd = static_cast(literal->value.get()); + source_fd = static_cast(literal->value.safeGet()); else if (type == Field::Types::UInt64) - source_fd = static_cast(literal->value.get()); + source_fd = static_cast(literal->value.safeGet()); else if (type == Field::Types::String) StorageFile::parseFileSource( - literal->value.get(), + literal->value.safeGet(), source_path, storage_args.path_to_archive, factory_args.getLocalContext()->getSettingsRef().allow_archive_path_syntax); diff --git a/src/Storages/StorageFileCluster.cpp b/src/Storages/StorageFileCluster.cpp index d43e242f70c..82ae0b761ae 100644 --- a/src/Storages/StorageFileCluster.cpp +++ b/src/Storages/StorageFileCluster.cpp @@ -61,7 +61,7 @@ StorageFileCluster::StorageFileCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, paths.empty() ? "" : paths[0])); } void StorageFileCluster::updateQueryToSendIfNeeded(DB::ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const DB::ContextPtr & context) diff --git a/src/Storages/StorageFuzzJSON.cpp b/src/Storages/StorageFuzzJSON.cpp index 9950d41f1c2..fc73f246d35 100644 --- a/src/Storages/StorageFuzzJSON.cpp +++ b/src/Storages/StorageFuzzJSON.cpp @@ -419,7 +419,7 @@ void fuzzJSONObject( if (val.fixed->getType() == Field::Types::Which::String) { out << fuzzJSONStructure(config, rnd, "\""); - writeText(val.fixed->get(), out); + writeText(val.fixed->safeGet(), out); out << fuzzJSONStructure(config, rnd, "\""); } else diff --git a/src/Storages/StorageInMemoryMetadata.cpp b/src/Storages/StorageInMemoryMetadata.cpp index 2226de3e64f..4a655cac566 100644 --- a/src/Storages/StorageInMemoryMetadata.cpp +++ b/src/Storages/StorageInMemoryMetadata.cpp @@ -16,6 +16,7 @@ #include #include #include +#include namespace DB @@ -334,10 +335,17 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( NameSet required_ttl_columns; NameSet updated_ttl_columns; - auto add_dependent_columns = [&updated_columns](const Names & required_columns, auto & to_set) + auto add_dependent_columns = [&updated_columns](const Names & required_columns, auto & to_set, bool is_projection = false) { for (const auto & dependency : required_columns) { + /// useful in the case of lightweight delete with wide part and option of rebuild projection + if (is_projection && updated_columns.contains(RowExistsColumn::name)) + { + to_set.insert(required_columns.begin(), required_columns.end()); + return true; + } + if (updated_columns.contains(dependency)) { to_set.insert(required_columns.begin(), required_columns.end()); @@ -357,7 +365,7 @@ ColumnDependencies StorageInMemoryMetadata::getColumnDependencies( for (const auto & projection : getProjections()) { if (has_dependency(projection.name, ColumnDependency::PROJECTION)) - add_dependent_columns(projection.getRequiredColumns(), projections_columns); + add_dependent_columns(projection.getRequiredColumns(), projections_columns, true); } auto add_for_rows_ttl = [&](const auto & expression, auto & to_set) diff --git a/src/Storages/StorageJoin.cpp b/src/Storages/StorageJoin.cpp index 9dace45d2ac..efa15c382dd 100644 --- a/src/Storages/StorageJoin.cpp +++ b/src/Storages/StorageJoin.cpp @@ -383,10 +383,10 @@ void registerStorageJoin(StorageFactory & factory) else if (setting.name == "any_join_distinct_right_table_keys") old_any_join = setting.value; else if (setting.name == "disk") - disk_name = setting.value.get(); + disk_name = setting.value.safeGet(); else if (setting.name == "persistent") { - persistent = setting.value.get(); + persistent = setting.value.safeGet(); } else throw Exception(ErrorCodes::BAD_ARGUMENTS, "Unknown setting {} for storage {}", setting.name, args.engine_name); @@ -546,7 +546,11 @@ protected: return {}; Chunk chunk; - if (!joinDispatch(join->kind, join->strictness, join->data->maps.front(), + if (!joinDispatch( + join->kind, + join->strictness, + join->data->maps.front(), + join->table_join->getMixedJoinExpression() != nullptr, [&](auto kind, auto strictness, auto & map) { chunk = createChunk(map); })) throw Exception(ErrorCodes::LOGICAL_ERROR, "Unknown JOIN strictness"); return chunk; diff --git a/src/Storages/StorageMaterializedView.cpp b/src/Storages/StorageMaterializedView.cpp index 696136834d4..4c6c2fff209 100644 --- a/src/Storages/StorageMaterializedView.cpp +++ b/src/Storages/StorageMaterializedView.cpp @@ -203,7 +203,6 @@ StorageMaterializedView::StorageMaterializedView( { fixed_uuid = false; refresher = RefreshTask::create( - *this, getContext(), *query.refresh_strategy); refresh_on_start = mode < LoadingStrictnessLevel::ATTACH && !query.is_create_empty; diff --git a/src/Storages/StoragePostgreSQL.cpp b/src/Storages/StoragePostgreSQL.cpp index d3214e7ed13..e0a4af68824 100644 --- a/src/Storages/StoragePostgreSQL.cpp +++ b/src/Storages/StoragePostgreSQL.cpp @@ -294,7 +294,7 @@ public: { const auto * array_type = typeid_cast(data_type.get()); const auto & nested = array_type->getNestedType(); - const auto & array = array_field.get(); + const auto & array = array_field.safeGet(); if (!isArray(nested)) { @@ -312,7 +312,7 @@ public: if (!isArray(nested_array_type->getNestedType())) { - parseArrayContent(iter->get(), nested, ostr); + parseArrayContent(iter->safeGet(), nested, ostr); } else { diff --git a/src/Storages/StorageReplicatedMergeTree.cpp b/src/Storages/StorageReplicatedMergeTree.cpp index a3c1ab7cdff..38e94835beb 100644 --- a/src/Storages/StorageReplicatedMergeTree.cpp +++ b/src/Storages/StorageReplicatedMergeTree.cpp @@ -5704,7 +5704,8 @@ std::optional StorageReplicatedMergeTree::distributedWriteFromClu { auto connection = std::make_shared( node.host_name, node.port, query_context->getGlobalContext()->getCurrentDatabase(), - node.user, node.password, SSHKey(), /*jwt*/"", node.quota_key, node.cluster, node.cluster_secret, + node.user, node.password, node.proto_send_chunked, node.proto_recv_chunked, + SSHKey(), /*jwt*/"", node.quota_key, node.cluster, node.cluster_secret, "ParallelInsertSelectInititiator", node.compression, node.secure diff --git a/src/Storages/StorageURL.cpp b/src/Storages/StorageURL.cpp index 76c49c50f76..6442891cf23 100644 --- a/src/Storages/StorageURL.cpp +++ b/src/Storages/StorageURL.cpp @@ -36,6 +36,8 @@ #include #include #include + +#include #include #include #include @@ -90,11 +92,22 @@ static const std::vector> optional_regex_keys = { std::make_shared(R"(headers.header\[[0-9]*\].value)"), }; -static bool urlWithGlobs(const String & uri) +bool urlWithGlobs(const String & uri) { return (uri.find('{') != std::string::npos && uri.find('}') != std::string::npos) || uri.find('|') != std::string::npos; } +String getSampleURI(String uri, ContextPtr context) +{ + if (urlWithGlobs(uri)) + { + auto uris = parseRemoteDescription(uri, 0, uri.size(), ',', context->getSettingsRef().glob_expansion_max_elements); + if (!uris.empty()) + return uris[0]; + } + return uri; +} + static ConnectionTimeouts getHTTPTimeouts(ContextPtr context) { return ConnectionTimeouts::getHTTPTimeouts(context->getSettingsRef(), context->getServerSettings().keep_alive_timeout); @@ -153,7 +166,8 @@ IStorageURLBase::IStorageURLBase( storage_metadata.setConstraints(constraints_); storage_metadata.setComment(comment); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context_, getSampleURI(uri, context_), format_settings)); } @@ -414,13 +428,14 @@ Chunk StorageURLSource::generate() size_t chunk_size = 0; if (input_format) chunk_size = input_format->getApproxBytesReadForChunk(); + progress(num_rows, chunk_size ? chunk_size : chunk.bytes()); VirtualColumnUtils::addRequestedFileLikeStorageVirtualsToChunk( chunk, requested_virtual_columns, { .path = curr_uri.getPath(), - .size = current_file_size - }); + .size = current_file_size, + }, getContext(), columns_description); return chunk; } @@ -839,7 +854,7 @@ namespace format = format_name; } - String getLastFileName() const override { return current_url_option; } + String getLastFilePath() const override { return current_url_option; } bool supportsLastReadBufferRecreation() const override { return true; } @@ -1160,6 +1175,7 @@ void ReadFromURL::createIterator(const ActionsDAG::Node * predicate) void ReadFromURL::initializePipeline(QueryPipelineBuilder & pipeline, const BuildQueryPipelineSettings &) { createIterator(nullptr); + const auto & settings = context->getSettingsRef(); if (is_empty_glob) { @@ -1170,7 +1186,6 @@ void ReadFromURL::initializePipeline(QueryPipelineBuilder & pipeline, const Buil Pipes pipes; pipes.reserve(num_streams); - const auto & settings = context->getSettingsRef(); const size_t max_parsing_threads = num_streams >= settings.max_parsing_threads ? 1 : (settings.max_parsing_threads / num_streams); for (size_t i = 0; i < num_streams; ++i) diff --git a/src/Storages/StorageURL.h b/src/Storages/StorageURL.h index 9d060e9e11c..19daf843431 100644 --- a/src/Storages/StorageURL.h +++ b/src/Storages/StorageURL.h @@ -141,6 +141,9 @@ private: virtual Block getHeaderBlock(const Names & column_names, const StorageSnapshotPtr & storage_snapshot) const = 0; }; +bool urlWithGlobs(const String & uri); + +String getSampleURI(String uri, ContextPtr context); class StorageURLSource : public SourceWithKeyCondition, WithContext { diff --git a/src/Storages/StorageURLCluster.cpp b/src/Storages/StorageURLCluster.cpp index 69041482fd8..7c7a299c64e 100644 --- a/src/Storages/StorageURLCluster.cpp +++ b/src/Storages/StorageURLCluster.cpp @@ -76,7 +76,7 @@ StorageURLCluster::StorageURLCluster( storage_metadata.setConstraints(constraints_); setInMemoryMetadata(storage_metadata); - setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns())); + setVirtuals(VirtualColumnUtils::getVirtualsForFileLikeStorage(storage_metadata.getColumns(), context, getSampleURI(uri, context))); } void StorageURLCluster::updateQueryToSendIfNeeded(ASTPtr & query, const StorageSnapshotPtr & storage_snapshot, const ContextPtr & context) diff --git a/src/Storages/System/StorageSystemBuildOptions.cpp.in b/src/Storages/System/StorageSystemBuildOptions.cpp.in index a81bcb08bfc..9e5adbfe825 100644 --- a/src/Storages/System/StorageSystemBuildOptions.cpp.in +++ b/src/Storages/System/StorageSystemBuildOptions.cpp.in @@ -21,7 +21,7 @@ const char * auto_config_build[] "BUILD_COMPILE_DEFINITIONS", "@BUILD_COMPILE_DEFINITIONS@", "USE_EMBEDDED_COMPILER", "@USE_EMBEDDED_COMPILER@", "USE_GLIBC_COMPATIBILITY", "@GLIBC_COMPATIBILITY@", - "USE_JEMALLOC", "@ENABLE_JEMALLOC@", + "USE_JEMALLOC", "@USE_JEMALLOC@", "USE_ICU", "@USE_ICU@", "USE_H3", "@USE_H3@", "USE_MYSQL", "@USE_MYSQL@", @@ -36,7 +36,7 @@ const char * auto_config_build[] "USE_SSL", "@USE_SSL@", "OPENSSL_VERSION", "@OPENSSL_VERSION@", "OPENSSL_IS_BORING_SSL", "@OPENSSL_IS_BORING_SSL@", - "USE_VECTORSCAN", "@ENABLE_VECTORSCAN@", + "USE_VECTORSCAN", "@USE_VECTORSCAN@", "USE_SIMDJSON", "@USE_SIMDJSON@", "USE_ODBC", "@USE_ODBC@", "USE_GRPC", "@USE_GRPC@", @@ -62,8 +62,8 @@ const char * auto_config_build[] "USE_ARROW", "@USE_ARROW@", "USE_ORC", "@USE_ORC@", "USE_MSGPACK", "@USE_MSGPACK@", - "USE_QPL", "@ENABLE_QPL@", - "USE_QAT", "@ENABLE_QATLIB@", + "USE_QPL", "@USE_QPL@", + "USE_QATLIB", "@USE_QATLIB@", "GIT_HASH", "@GIT_HASH@", "GIT_BRANCH", R"IRjaNsZIL9Yh7FQ4(@GIT_BRANCH@)IRjaNsZIL9Yh7FQ4", "GIT_DATE", "@GIT_DATE@", diff --git a/src/Storages/System/StorageSystemColumns.cpp b/src/Storages/System/StorageSystemColumns.cpp index 03c569303c5..bc13cb77d5e 100644 --- a/src/Storages/System/StorageSystemColumns.cpp +++ b/src/Storages/System/StorageSystemColumns.cpp @@ -105,8 +105,8 @@ protected: while (rows_count < max_block_size && db_table_num < total_tables) { - const std::string database_name = (*databases)[db_table_num].get(); - const std::string table_name = (*tables)[db_table_num].get(); + const std::string database_name = (*databases)[db_table_num].safeGet(); + const std::string table_name = (*tables)[db_table_num].safeGet(); ++db_table_num; ColumnsDescription columns; @@ -437,7 +437,7 @@ void ReadFromSystemColumns::initializePipeline(QueryPipelineBuilder & pipeline, for (size_t i = 0; i < num_databases; ++i) { - const std::string database_name = (*database_column)[i].get(); + const std::string database_name = (*database_column)[i].safeGet(); if (database_name.empty()) { for (auto & [table_name, table] : external_tables) diff --git a/src/Storages/System/StorageSystemContributors.generated.cpp b/src/Storages/System/StorageSystemContributors.generated.cpp index 35b9c0008c6..67dfe3bfe86 100644 --- a/src/Storages/System/StorageSystemContributors.generated.cpp +++ b/src/Storages/System/StorageSystemContributors.generated.cpp @@ -457,6 +457,7 @@ const char * auto_contributors[] { "Gleb-Tretyakov", "GoGoWen2021", "Gosha Letov", + "Graham Campbell", "Gregory", "Grigorii Sokolik", "Grigory", @@ -472,6 +473,7 @@ const char * auto_contributors[] { "Habibullah Oladepo", "HaiBo Li", "Hakob Saghatelyan", + "Halersson Paris", "Hamoon", "Han Fei", "Han Shukai", @@ -541,6 +543,7 @@ const char * auto_contributors[] { "JackyWoo", "Jacob Hayes", "Jacob Herrington", + "Jacob Reckhard", "Jai Jhala", "Jake Bamrah", "Jake Liu", @@ -661,6 +664,7 @@ const char * auto_contributors[] { "LaurieLY", "Lee sungju", "Lemore", + "Lennard Eijsackers", "Leonardo Cecchi", "Leonardo Maciel", "Leonid Krylov", @@ -922,6 +926,7 @@ const char * auto_contributors[] { "Pervakov Grigorii", "Pervakov Grigory", "Peter", + "Peter Nguyen", "Petr Vasilev", "Pham Anh Tuan", "Philip Hallstrom", @@ -981,6 +986,7 @@ const char * auto_contributors[] { "Ronald Bradford", "Rory Crispin", "Roy Bellingan", + "Ruihang Xia", "Ruslan", "Ruslan Mardugalliamov", "Ruslan Savchenko", @@ -1000,9 +1006,11 @@ const char * auto_contributors[] { "Sami Kerola", "Samuel Chou", "Samuel Colvin", + "Samuele Guerrini", "San", "Sanjam Panda", "Sariel", + "Sasha Sheikin", "Saulius Valatka", "Sean Haynes", "Sean Lafferty", @@ -1202,6 +1210,7 @@ const char * auto_contributors[] { "Vladimir Makarov", "Vladimir Mihailenco", "Vladimir Smirnov", + "Vladimir Varankin", "Vladislav Rassokhin", "Vladislav Smirnov", "Vladislav V", @@ -1275,6 +1284,7 @@ const char * auto_contributors[] { "Zhichun Wu", "Zhiguo Zhou", "Zhipeng", + "Zhukova, Maria", "Zhuo Qiu", "Zijie Lu", "Zimu Li", @@ -1502,6 +1512,7 @@ const char * auto_contributors[] { "hchen9", "hcz", "hdhoang", + "heguangnan", "heleihelei", "helifu", "hendrik-m", @@ -1572,6 +1583,7 @@ const char * auto_contributors[] { "kevinyhzou", "kgurjev", "khamadiev", + "khodyrevyurii", "kigerzhang", "kirillikoff", "kmeaw", @@ -1787,6 +1799,7 @@ const char * auto_contributors[] { "ruslandoga", "ryzuo", "s-kat", + "sakulali", "sanjam", "santaux", "santrancisco", @@ -1804,6 +1817,7 @@ const char * auto_contributors[] { "shabroo", "shangshujie", "shedx", + "shiyer7474", "shuai-xu", "shuchaome", "shuyang", @@ -1901,6 +1915,7 @@ const char * auto_contributors[] { "wzl", "xPoSx", "xbthink", + "xc0derx", "xiao", "xiaolei565", "xiebin", @@ -1964,6 +1979,7 @@ const char * auto_contributors[] { "zkun", "zlx19950903", "zombee0", + "zoomxi", "zvonand", "zvrr", "zvvr", diff --git a/src/Storages/System/StorageSystemKafkaConsumers.cpp b/src/Storages/System/StorageSystemKafkaConsumers.cpp index 86713632339..db6804d3ad7 100644 --- a/src/Storages/System/StorageSystemKafkaConsumers.cpp +++ b/src/Storages/System/StorageSystemKafkaConsumers.cpp @@ -42,7 +42,7 @@ ColumnsDescription StorageSystemKafkaConsumers::getColumnsDescription() {"num_rebalance_revocations", std::make_shared(), "Number of times the consumer was revoked its partitions."}, {"num_rebalance_assignments", std::make_shared(), "Number of times the consumer was assigned to Kafka cluster."}, {"is_currently_used", std::make_shared(), "The flag which shows whether the consumer is in use."}, - {"last_used", std::make_shared(6), "The last time this consumer was in use."}, + {"last_used", std::make_shared(), "The last time this consumer was in use, unix time in microseconds."}, {"rdkafka_stat", std::make_shared(), "Library internal statistic. Set statistics_interval_ms to 0 disable, default is 3000 (once in three seconds)."}, }; } @@ -79,7 +79,7 @@ void StorageSystemKafkaConsumers::fillData(MutableColumns & res_columns, Context auto & num_rebalance_revocations = assert_cast(*res_columns[index++]); auto & num_rebalance_assigments = assert_cast(*res_columns[index++]); auto & is_currently_used = assert_cast(*res_columns[index++]); - auto & last_used = assert_cast(*res_columns[index++]); + auto & last_used = assert_cast(*res_columns[index++]); auto & rdkafka_stat = assert_cast(*res_columns[index++]); const auto access = context->getAccess(); diff --git a/src/Storages/System/StorageSystemPartsBase.cpp b/src/Storages/System/StorageSystemPartsBase.cpp index 7ace8ee24aa..c87bdb6d26a 100644 --- a/src/Storages/System/StorageSystemPartsBase.cpp +++ b/src/Storages/System/StorageSystemPartsBase.cpp @@ -138,7 +138,7 @@ StoragesInfoStream::StoragesInfoStream(std::optional filter_by_datab for (size_t i = 0; i < rows; ++i) { - String database_name = (*database_column_for_filter)[i].get(); + String database_name = (*database_column_for_filter)[i].safeGet(); const DatabasePtr database = databases.at(database_name); offsets[i] = i ? offsets[i - 1] : 0; diff --git a/src/Storages/System/StorageSystemPartsBase.h b/src/Storages/System/StorageSystemPartsBase.h index 806af4a7bf8..3be73aeda17 100644 --- a/src/Storages/System/StorageSystemPartsBase.h +++ b/src/Storages/System/StorageSystemPartsBase.h @@ -52,13 +52,13 @@ public: { StoragesInfo info; - info.database = (*database_column)[next_row].get(); - info.table = (*table_column)[next_row].get(); - UUID storage_uuid = (*storage_uuid_column)[next_row].get(); + info.database = (*database_column)[next_row].safeGet(); + info.table = (*table_column)[next_row].safeGet(); + UUID storage_uuid = (*storage_uuid_column)[next_row].safeGet(); auto is_same_table = [&storage_uuid, this] (size_t row) -> bool { - return (*storage_uuid_column)[row].get() == storage_uuid; + return (*storage_uuid_column)[row].safeGet() == storage_uuid; }; /// We may have two rows per table which differ in 'active' value. @@ -66,7 +66,7 @@ public: /// must collect the inactive parts. Remember this fact in StoragesInfo. for (; next_row < rows && is_same_table(next_row); ++next_row) { - const auto active = (*active_column)[next_row].get(); + const auto active = (*active_column)[next_row].safeGet(); if (active == 0) info.need_inactive_parts = true; } diff --git a/src/Storages/System/StorageSystemQueryCache.cpp b/src/Storages/System/StorageSystemQueryCache.cpp index 4c54d4ae16f..b3532ba40a7 100644 --- a/src/Storages/System/StorageSystemQueryCache.cpp +++ b/src/Storages/System/StorageSystemQueryCache.cpp @@ -1,6 +1,7 @@ #include "StorageSystemQueryCache.h" #include #include +#include #include #include #include @@ -15,6 +16,7 @@ ColumnsDescription StorageSystemQueryCache::getColumnsDescription() { {"query", std::make_shared(), "Query string."}, {"result_size", std::make_shared(), "Size of the query cache entry."}, + {"tag", std::make_shared(std::make_shared()), "Tag of the query cache entry."}, {"stale", std::make_shared(), "If the query cache entry is stale."}, {"shared", std::make_shared(), "If the query cache entry is shared between multiple users."}, {"compressed", std::make_shared(), "If the query cache entry is compressed."}, @@ -51,11 +53,12 @@ void StorageSystemQueryCache::fillData(MutableColumns & res_columns, ContextPtr res_columns[0]->insert(key.query_string); /// approximates the original query string res_columns[1]->insert(QueryCache::QueryCacheEntryWeight()(*query_result)); - res_columns[2]->insert(key.expires_at < std::chrono::system_clock::now()); - res_columns[3]->insert(key.is_shared); - res_columns[4]->insert(key.is_compressed); - res_columns[5]->insert(std::chrono::system_clock::to_time_t(key.expires_at)); - res_columns[6]->insert(key.ast_hash.low64); /// query cache considers aliases (issue #56258) + res_columns[2]->insert(key.tag); + res_columns[3]->insert(key.expires_at < std::chrono::system_clock::now()); + res_columns[4]->insert(key.is_shared); + res_columns[5]->insert(key.is_compressed); + res_columns[6]->insert(std::chrono::system_clock::to_time_t(key.expires_at)); + res_columns[7]->insert(key.ast_hash.low64); /// query cache considers aliases (issue #56258) } } diff --git a/src/Storages/System/StorageSystemViewRefreshes.cpp b/src/Storages/System/StorageSystemViewRefreshes.cpp index 30539ed6b6a..061201017a7 100644 --- a/src/Storages/System/StorageSystemViewRefreshes.cpp +++ b/src/Storages/System/StorageSystemViewRefreshes.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -19,6 +20,7 @@ ColumnsDescription StorageSystemViewRefreshes::getColumnsDescription() { {"database", std::make_shared(), "The name of the database the table is in."}, {"view", std::make_shared(), "Table name."}, + {"uuid", std::make_shared(), "Table uuid (Atomic database)."}, {"status", std::make_shared(), "Current state of the refresh."}, {"last_refresh_result", std::make_shared(), "Outcome of the latest refresh attempt."}, {"last_refresh_time", std::make_shared(std::make_shared()), @@ -63,6 +65,7 @@ void StorageSystemViewRefreshes::fillData( std::size_t i = 0; res_columns[i++]->insert(refresh.view_id.getDatabaseName()); res_columns[i++]->insert(refresh.view_id.getTableName()); + res_columns[i++]->insert(refresh.view_id.uuid); res_columns[i++]->insert(toString(refresh.state)); res_columns[i++]->insert(toString(refresh.last_refresh_result)); diff --git a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp index f9e7290e514..746a6a28274 100644 --- a/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp +++ b/src/Storages/TimeSeries/TimeSeriesDefinitionNormalizer.cpp @@ -227,8 +227,11 @@ void TimeSeriesDefinitionNormalizer::addMissingColumns(ASTCreateQuery & create) /// We use Nullable(DateTime64(3)) as the default type of the `min_time` and `max_time` columns. /// It's nullable because it allows the aggregation (see aggregate_min_time_and_max_time) work correctly even /// for rows in the "tags" table which doesn't have `min_time` and `max_time` (because they have no matching rows in the "data" table). - make_new_column(TimeSeriesColumnNames::MinTime, make_nullable(timestamp_type)); - make_new_column(TimeSeriesColumnNames::MaxTime, make_nullable(timestamp_type)); + + if (!is_next_column_named(TimeSeriesColumnNames::MinTime)) + make_new_column(TimeSeriesColumnNames::MinTime, make_nullable(timestamp_type)); + if (!is_next_column_named(TimeSeriesColumnNames::MaxTime)) + make_new_column(TimeSeriesColumnNames::MaxTime, make_nullable(timestamp_type)); } /// Add missing columns for the "metrics" table. diff --git a/src/Storages/VirtualColumnUtils.cpp b/src/Storages/VirtualColumnUtils.cpp index 19694830c4e..d932f5cc469 100644 --- a/src/Storages/VirtualColumnUtils.cpp +++ b/src/Storages/VirtualColumnUtils.cpp @@ -1,4 +1,3 @@ -#include #include #include #include @@ -37,11 +36,17 @@ #include #include +#include #include +#include +#include +#include +#include #include "Functions/FunctionsLogical.h" #include "Functions/IFunction.h" #include "Functions/IFunctionAdaptors.h" #include "Functions/indexHint.h" +#include #include #include #include @@ -51,6 +56,11 @@ namespace DB { +namespace ErrorCodes +{ + extern const int INCORRECT_DATA; +} + namespace VirtualColumnUtils { @@ -119,7 +129,29 @@ NameSet getVirtualNamesForFileLikeStorage() return {"_path", "_file", "_size", "_time", "_etag"}; } -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns) +std::unordered_map parseHivePartitioningKeysAndValues(const String & path, const ColumnsDescription & storage_columns) +{ + std::string pattern = "([^/]+)=([^/]+)/"; + re2::StringPiece input_piece(path); + + std::unordered_map key_values; + std::string key, value; + std::unordered_set used_keys; + while (RE2::FindAndConsume(&input_piece, pattern, &key, &value)) + { + if (used_keys.contains(key)) + throw Exception(ErrorCodes::INCORRECT_DATA, "Path '{}' to file with enabled hive-style partitioning contains duplicated partition key {}, only unique keys are allowed", path, key); + used_keys.insert(key); + + auto col_name = "_" + key; + while (storage_columns.has(col_name)) + col_name = "_" + col_name; + key_values[col_name] = value; + } + return key_values; +} + +VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns, const ContextPtr & context, const std::string & path, std::optional format_settings_) { VirtualColumnsDescription desc; @@ -137,6 +169,22 @@ VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription add_virtual("_time", makeNullable(std::make_shared())); add_virtual("_etag", std::make_shared(std::make_shared())); + if (context->getSettingsRef().use_hive_partitioning) + { + auto map = parseHivePartitioningKeysAndValues(path, storage_columns); + auto format_settings = format_settings_ ? *format_settings_ : getFormatSettings(context); + for (auto & item : map) + { + auto type = tryInferDataTypeByEscapingRule(item.second, format_settings, FormatSettings::EscapingRule::Raw); + if (type == nullptr) + type = std::make_shared(); + if (type->canBeInsideLowCardinality()) + add_virtual(item.first, std::make_shared(type)); + else + add_virtual(item.first, type); + } + } + return desc; } @@ -196,8 +244,12 @@ ColumnPtr getFilterByPathAndFileIndexes(const std::vector & paths, const void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values) + VirtualsForFileLikeStorage virtual_values, ContextPtr context, const ColumnsDescription & columns) { + std::unordered_map hive_map; + if (context->getSettingsRef().use_hive_partitioning) + hive_map = parseHivePartitioningKeysAndValues(virtual_values.path, columns); + for (const auto & virtual_column : requested_virtual_columns) { if (virtual_column.name == "_path") @@ -231,6 +283,10 @@ void addRequestedFileLikeStorageVirtualsToChunk( else chunk.addColumn(virtual_column.type->createColumnConstWithDefaultValue(chunk.getNumRows())->convertToFullColumnIfConst()); } + else if (auto it = hive_map.find(virtual_column.getNameInStorage()); it != hive_map.end()) + { + chunk.addColumn(virtual_column.type->createColumnConst(chunk.getNumRows(), convertFieldToType(Field(it->second), *virtual_column.type))->convertToFullColumnIfConst()); + } else if (virtual_column.name == "_etag") { if (virtual_values.etag) @@ -281,9 +337,7 @@ bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node) } static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( - const ActionsDAG::Node * node, - const Block * allowed_inputs, - ActionsDAG::Nodes & additional_nodes) + const ActionsDAG::Node * node, const Block * allowed_inputs, ActionsDAG::Nodes & additional_nodes, bool allow_partial_result) { if (node->type == ActionsDAG::ActionType::FUNCTION) { @@ -292,8 +346,15 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto & node_copy = additional_nodes.emplace_back(*node); node_copy.children.clear(); for (const auto * child : node->children) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes)) + if (const auto * child_copy + = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_partial_result)) node_copy.children.push_back(child_copy); + /// Expression like (now_allowed AND allowed) is not allowed if allow_partial_result = true. This is important for + /// trivial count optimization, otherwise we can get incorrect results. For example, if the query is + /// SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1, we cannot apply + /// trivial count. + else if (!allow_partial_result) + return nullptr; if (node_copy.children.empty()) return nullptr; @@ -301,7 +362,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( if (node_copy.children.size() == 1) { const ActionsDAG::Node * res = node_copy.children.front(); - /// Expression like (not_allowed AND 256) can't be resuced to (and(256)) because AND requires + /// Expression like (not_allowed AND 256) can't be reduced to (and(256)) because AND requires /// at least two arguments; also it can't be reduced to (256) because result type is different. if (!res->result_type->equals(*node->result_type)) { @@ -319,7 +380,7 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( { auto & node_copy = additional_nodes.emplace_back(*node); for (auto & child : node_copy.children) - if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes); !child) + if (child = splitFilterNodeForAllowedInputs(child, allowed_inputs, additional_nodes, allow_partial_result); !child) return nullptr; return &node_copy; @@ -333,7 +394,8 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( auto index_hint_dag = index_hint->getActions().clone(); ActionsDAG::NodeRawConstPtrs atoms; for (const auto & output : index_hint_dag.getOutputs()) - if (const auto * child_copy = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes)) + if (const auto * child_copy + = splitFilterNodeForAllowedInputs(output, allowed_inputs, additional_nodes, allow_partial_result)) atoms.push_back(child_copy); if (!atoms.empty()) @@ -367,22 +429,24 @@ static const ActionsDAG::Node * splitFilterNodeForAllowedInputs( return node; } -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs) +std::optional +splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_partial_result) { if (!predicate) return {}; ActionsDAG::Nodes additional_nodes; - const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes); + const auto * res = splitFilterNodeForAllowedInputs(predicate, allowed_inputs, additional_nodes, allow_partial_result); if (!res) return {}; return ActionsDAG::cloneSubDAG({res}, true); } -void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context) +void filterBlockWithPredicate( + const ActionsDAG::Node * predicate, Block & block, ContextPtr context, bool allow_filtering_with_partial_predicate) { - auto dag = splitFilterDagForAllowedInputs(predicate, &block); + auto dag = splitFilterDagForAllowedInputs(predicate, &block, /*allow_partial_result=*/allow_filtering_with_partial_predicate); if (dag) filterBlockWithExpression(buildFilterExpression(std::move(*dag), context), block); } diff --git a/src/Storages/VirtualColumnUtils.h b/src/Storages/VirtualColumnUtils.h index 1ed369300ff..23e16871798 100644 --- a/src/Storages/VirtualColumnUtils.h +++ b/src/Storages/VirtualColumnUtils.h @@ -5,6 +5,7 @@ #include #include #include +#include #include @@ -26,9 +27,13 @@ namespace VirtualColumnUtils /// /// Otherwise calling filter*() outside applyFilters() will throw "Not-ready Set is passed" /// if there are subqueries. +/// +/// Similar to filterBlockWithExpression(buildFilterExpression(splitFilterDagForAllowedInputs(...)))./// Similar to filterBlockWithQuery, but uses ActionsDAG as a predicate. +/// Basically it is filterBlockWithDAG(splitFilterDagForAllowedInputs). +/// If allow_filtering_with_partial_predicate is true, then the filtering will be done even if some part of the predicate +/// cannot be evaluated using the columns from the block. +void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context, bool allow_filtering_with_partial_predicate = true); -/// Similar to filterBlockWithExpression(buildFilterExpression(splitFilterDagForAllowedInputs(...))). -void filterBlockWithPredicate(const ActionsDAG::Node * predicate, Block & block, ContextPtr context); /// Just filters block. Block should contain all the required columns. ExpressionActionsPtr buildFilterExpression(ActionsDAG dag, ContextPtr context); @@ -41,7 +46,15 @@ void buildSetsForDAG(const ActionsDAG & dag, const ContextPtr & context); bool isDeterministicInScopeOfQuery(const ActionsDAG::Node * node); /// Extract a part of predicate that can be evaluated using only columns from input_names. -std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs); +/// When allow_partial_result is false, then the result will be empty if any part of if cannot be evaluated deterministically +/// on the given inputs. +/// allow_partial_result must be false when we are going to use the result to filter parts in +/// MergeTreeData::totalRowsByPartitionPredicateImp. For example, if the query is +/// `SELECT count() FROM table WHERE _partition_id = '0' AND rowNumberInBlock() = 1` +/// The predicate will be `_partition_id = '0' AND rowNumberInBlock() = 1`, and `rowNumberInBlock()` is +/// non-deterministic. If we still extract the part `_partition_id = '0'` for filtering parts, then trivial +/// count optimization will be mistakenly applied to the query. +std::optional splitFilterDagForAllowedInputs(const ActionsDAG::Node * predicate, const Block * allowed_inputs, bool allow_partial_result = true); /// Extract from the input stream a set of `name` column values template @@ -51,12 +64,16 @@ auto extractSingleValueFromBlock(const Block & block, const String & name) const ColumnWithTypeAndName & data = block.getByName(name); size_t rows = block.rows(); for (size_t i = 0; i < rows; ++i) - res.insert((*data.column)[i].get()); + res.insert((*data.column)[i].safeGet()); return res; } NameSet getVirtualNamesForFileLikeStorage(); -VirtualColumnsDescription getVirtualsForFileLikeStorage(const ColumnsDescription & storage_columns); +VirtualColumnsDescription getVirtualsForFileLikeStorage( + const ColumnsDescription & storage_columns, + const ContextPtr & context, + const std::string & sample_path = "", + std::optional format_settings_ = std::nullopt); std::optional createPathAndFileFilterDAG(const ActionsDAG::Node * predicate, const NamesAndTypesList & virtual_columns); @@ -88,7 +105,7 @@ struct VirtualsForFileLikeStorage void addRequestedFileLikeStorageVirtualsToChunk( Chunk & chunk, const NamesAndTypesList & requested_virtual_columns, - VirtualsForFileLikeStorage virtual_values); + VirtualsForFileLikeStorage virtual_values, ContextPtr context, const ColumnsDescription & columns); } } diff --git a/src/Storages/fuzzers/CMakeLists.txt b/src/Storages/fuzzers/CMakeLists.txt index ec56b853666..f67552716a2 100644 --- a/src/Storages/fuzzers/CMakeLists.txt +++ b/src/Storages/fuzzers/CMakeLists.txt @@ -4,4 +4,4 @@ clickhouse_add_executable (mergetree_checksum_fuzzer mergetree_checksum_fuzzer.c target_link_libraries (mergetree_checksum_fuzzer PRIVATE dbms clickhouse_functions) clickhouse_add_executable (columns_description_fuzzer columns_description_fuzzer.cpp) -target_link_libraries (columns_description_fuzzer PRIVATE dbms clickhouse_functions) +target_link_libraries (columns_description_fuzzer PRIVATE clickhouse_functions) diff --git a/src/Storages/fuzzers/columns_description_fuzzer.cpp b/src/Storages/fuzzers/columns_description_fuzzer.cpp index 469bfe2fb38..e39afccd1f9 100644 --- a/src/Storages/fuzzers/columns_description_fuzzer.cpp +++ b/src/Storages/fuzzers/columns_description_fuzzer.cpp @@ -1,4 +1,5 @@ #include +#include #include diff --git a/src/Storages/getStructureOfRemoteTable.cpp b/src/Storages/getStructureOfRemoteTable.cpp index 9d23f132759..1408e120bc5 100644 --- a/src/Storages/getStructureOfRemoteTable.cpp +++ b/src/Storages/getStructureOfRemoteTable.cpp @@ -102,16 +102,16 @@ ColumnsDescription getStructureOfRemoteTableInShard( { ColumnDescription column; - column.name = (*name)[i].get(); + column.name = (*name)[i].safeGet(); - String data_type_name = (*type)[i].get(); + String data_type_name = (*type)[i].safeGet(); column.type = data_type_factory.get(data_type_name); - String kind_name = (*default_kind)[i].get(); + String kind_name = (*default_kind)[i].safeGet(); if (!kind_name.empty()) { column.default_desc.kind = columnDefaultKindFromString(kind_name); - String expr_str = (*default_expr)[i].get(); + String expr_str = (*default_expr)[i].safeGet(); column.default_desc.expression = parseQuery( expr_parser, expr_str.data(), expr_str.data() + expr_str.size(), "default expression", 0, settings.max_parser_depth, settings.max_parser_backtracks); @@ -207,8 +207,8 @@ ColumnsDescriptionByShardNum getExtendedObjectsOfRemoteTables( size_t size = name_col.size(); for (size_t i = 0; i < size; ++i) { - auto name = name_col[i].get(); - auto type_name = type_col[i].get(); + auto name = name_col[i].safeGet(); + auto type_name = type_col[i].safeGet(); auto storage_column = storage_columns.tryGetPhysical(name); if (storage_column && storage_column->type->hasDynamicSubcolumnsDeprecated()) diff --git a/src/TableFunctions/TableFunctionExplain.cpp b/src/TableFunctions/TableFunctionExplain.cpp index 552b9fde986..69d24c879bd 100644 --- a/src/TableFunctions/TableFunctionExplain.cpp +++ b/src/TableFunctions/TableFunctionExplain.cpp @@ -83,7 +83,7 @@ void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPt "Table function '{}' requires a String argument for EXPLAIN kind, got '{}'", getName(), queryToString(kind_arg)); - ASTExplainQuery::ExplainKind kind = ASTExplainQuery::fromString(kind_literal->value.get()); + ASTExplainQuery::ExplainKind kind = ASTExplainQuery::fromString(kind_literal->value.safeGet()); auto explain_query = std::make_shared(kind); const auto * settings_arg = function->arguments->children[1]->as(); @@ -92,7 +92,7 @@ void TableFunctionExplain::parseArguments(const ASTPtr & ast_function, ContextPt "Table function '{}' requires a serialized string settings argument, got '{}'", getName(), queryToString(function->arguments->children[1])); - const auto & settings_str = settings_arg->value.get(); + const auto & settings_str = settings_arg->value.safeGet(); if (!settings_str.empty()) { const Settings & settings = context->getSettingsRef(); diff --git a/src/TableFunctions/TableFunctionFile.cpp b/src/TableFunctions/TableFunctionFile.cpp index af327cfe54e..5cd249f000d 100644 --- a/src/TableFunctions/TableFunctionFile.cpp +++ b/src/TableFunctions/TableFunctionFile.cpp @@ -48,7 +48,7 @@ void TableFunctionFile::parseFirstArguments(const ASTPtr & arg, const ContextPtr else if (type == Field::Types::Int64 || type == Field::Types::UInt64) { fd = static_cast( - (type == Field::Types::Int64) ? literal->value.get() : literal->value.get()); + (type == Field::Types::Int64) ? literal->value.safeGet() : literal->value.safeGet()); if (fd < 0) throw Exception(ErrorCodes::BAD_ARGUMENTS, "File descriptor must be non-negative"); } diff --git a/src/TableFunctions/TableFunctionMergeTreeIndex.cpp b/src/TableFunctions/TableFunctionMergeTreeIndex.cpp index 06a48f0e25f..27ed50fb711 100644 --- a/src/TableFunctions/TableFunctionMergeTreeIndex.cpp +++ b/src/TableFunctions/TableFunctionMergeTreeIndex.cpp @@ -76,9 +76,9 @@ void TableFunctionMergeTreeIndex::parseArguments(const ASTPtr & ast_function, Co "Table function '{}' expected bool flag for 'with_marks' argument", getName()); if (value.getType() == Field::Types::Bool) - with_marks = value.get(); + with_marks = value.safeGet(); else - with_marks = value.get(); + with_marks = value.safeGet(); } if (!params.empty()) diff --git a/src/TableFunctions/TableFunctionObjectStorage.cpp b/src/TableFunctions/TableFunctionObjectStorage.cpp index 550d9cc799b..39392a4c44c 100644 --- a/src/TableFunctions/TableFunctionObjectStorage.cpp +++ b/src/TableFunctions/TableFunctionObjectStorage.cpp @@ -84,7 +84,8 @@ ColumnsDescription TableFunctionObjectStorage< context->checkAccess(getSourceAccessType()); ColumnsDescription columns; auto storage = getObjectStorage(context, !is_insert_query); - resolveSchemaAndFormat(columns, configuration->format, storage, configuration, std::nullopt, context); + std::string sample_path; + resolveSchemaAndFormat(columns, configuration->format, storage, configuration, std::nullopt, sample_path, context); return columns; } else diff --git a/src/configure_config.cmake b/src/configure_config.cmake index d22bf674df4..5a1aa179e01 100644 --- a/src/configure_config.cmake +++ b/src/configure_config.cmake @@ -135,6 +135,12 @@ endif() if (TARGET ch_contrib::vectorscan) set(USE_VECTORSCAN 1) endif() +if (TARGET ch_contrib::qpl) + set(USE_QPL 1) +endif() +if (TARGET ch_contrib::qatlib) + set(USE_QATLIB 1) +endif() if (TARGET ch_contrib::avrocpp) set(USE_AVRO 1) endif() @@ -158,11 +164,14 @@ endif() if (TARGET ch_contrib::bcrypt) set(USE_BCRYPT 1) endif() +if (TARGET ch_contrib::usearch) + set(USE_USEARCH 1) +endif() if (TARGET ch_contrib::ssh) set(USE_SSH 1) endif() -if (TARGET ch_contrib::fiu) - set(FIU_ENABLE 1) +if (TARGET ch_contrib::libfiu) + set(USE_LIBFIU 1) endif() if (TARGET ch_contrib::libarchive) set(USE_LIBARCHIVE 1) diff --git a/tests/ci/changelog.py b/tests/ci/changelog.py index 39e426945d3..554ba339892 100755 --- a/tests/ci/changelog.py +++ b/tests/ci/changelog.py @@ -19,7 +19,6 @@ from env_helper import TEMP_PATH from git_helper import git_runner, is_shallow from github_helper import GitHub, PullRequest, PullRequests, Repository from s3_helper import S3Helper -from get_robot_token import get_best_robot_token from ci_utils import Shell from version_helper import ( FILE_WITH_VERSION_PATH, @@ -115,7 +114,6 @@ def get_descriptions(prs: PullRequests) -> Dict[str, List[Description]]: # pylint: enable=protected-access if repo_name not in repos: repos[repo_name] = pr.base.repo - in_changelog = False merge_commit = pr.merge_commit_sha if merge_commit is None: logging.warning("PR %s does not have merge-commit, skipping", pr.number) @@ -173,7 +171,6 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--gh-user-or-token", help="user name or GH token to authenticate", - default=get_best_robot_token(), ) parser.add_argument( "--gh-password", @@ -291,7 +288,7 @@ def generate_description(item: PullRequest, repo: Repository) -> Optional[Descri # Normalize bug fixes if ( re.match( - r"(?i)bug\Wfix", + r".*(?i)bug\Wfix", category, ) # Map "Critical Bug Fix" to "Bug fix" category for changelog diff --git a/tests/ci/ci.py b/tests/ci/ci.py index 8d0414ce7a8..49b597333dc 100644 --- a/tests/ci/ci.py +++ b/tests/ci/ci.py @@ -985,6 +985,7 @@ def _run_test(job_name: str, run_command: str) -> int: else: print("Use run command from the workflow") env["CHECK_NAME"] = job_name + env["MAX_RUN_TIME"] = str(timeout or 0) print(f"Going to start run command [{run_command}]") stopwatch = Stopwatch() job_log = Path(TEMP_PATH) / "job_log.txt" diff --git a/tests/ci/ci_utils.py b/tests/ci/ci_utils.py index d807f5be09f..b8778e0cc50 100644 --- a/tests/ci/ci_utils.py +++ b/tests/ci/ci_utils.py @@ -167,6 +167,11 @@ class GH: latest_branch = Shell.get_output( 'gh pr list --label release --repo ClickHouse/ClickHouse --search "sort:created" -L1 --json headRefName' ) + if latest_branch: + latest_branch = json.loads(latest_branch)[0]["headRefName"] + print( + f"Latest branch [{latest_branch}], release branch [{branch}], release latest [{latest_branch == branch}]" + ) return latest_branch == branch diff --git a/tests/ci/create_release.py b/tests/ci/create_release.py index 27eba273ce0..68268b033fe 100755 --- a/tests/ci/create_release.py +++ b/tests/ci/create_release.py @@ -61,6 +61,7 @@ class ReleaseContextManager: # create initial release info self.release_info = ReleaseInfo( release_branch="NA", + release_type="NA", commit_sha=args.ref, release_tag="NA", version="NA", @@ -93,6 +94,7 @@ class ReleaseContextManager: @dataclasses.dataclass class ReleaseInfo: version: str + release_type: str release_tag: str release_branch: str commit_sha: str @@ -131,7 +133,7 @@ class ReleaseInfo: return self def prepare( - self, commit_ref: str, release_type: str, skip_tag_check: bool + self, commit_ref: str, release_type: str, _skip_tag_check: bool ) -> "ReleaseInfo": version = None release_branch = None @@ -143,17 +145,18 @@ class ReleaseInfo: assert release_type in ("patch", "new") if release_type == "new": # check commit_ref is right and on a right branch - Shell.check( - f"git merge-base --is-ancestor {commit_ref} origin/master", - strict=True, - verbose=True, - ) + if commit_ref != "master": + Shell.check( + f"git merge-base --is-ancestor {commit_ref} origin/master", + strict=True, + verbose=True, + ) with checkout(commit_ref): commit_sha = Shell.get_output_or_raise(f"git rev-list -n1 {commit_ref}") # Git() must be inside "with checkout" contextmanager git = Git() version = get_version_from_repo(git=git) - release_branch = "master" + release_branch = f"{version.major}.{version.minor}" expected_prev_tag = f"v{version.major}.{version.minor}.1.1-new" version.bump().with_description(VersionType.NEW) assert ( @@ -204,10 +207,11 @@ class ReleaseInfo: expected_tag_prefix ) and git.latest_tag.endswith(expected_tag_suffix): pass - elif not skip_tag_check: - assert ( - False - ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" + # TODO: uncomment and check with dry-run + # elif not skip_tag_check: + # assert ( + # False + # ), f"BUG: Unexpected latest tag [{git.latest_tag}] expected [{expected_tag_prefix}*{expected_tag_suffix}]. Already Released?" previous_release_sha = Shell.get_output_or_raise( f"git rev-list -n1 {previous_release_tag}" @@ -238,6 +242,7 @@ class ReleaseInfo: self.release_progress = ReleaseProgress.STARTED self.progress_status = ReleaseProgressDescription.OK self.latest = latest_release + self.release_type = release_type return self def push_release_tag(self, dry_run: bool) -> None: @@ -262,16 +267,15 @@ class ReleaseInfo: @staticmethod def _create_gh_label(label: str, color_hex: str, dry_run: bool) -> None: cmd = f"gh api repos/{CI.Envs.GITHUB_REPOSITORY}/labels -f name={label} -f color={color_hex}" - Shell.check(cmd, dry_run=dry_run, strict=True) + res = Shell.check(cmd, dry_run=dry_run, verbose=True) + if not res: + # not a critical error - do not fail. branch might be created already (recovery case) + print("WARNING: failed to create backport labels for the new branch") def push_new_release_branch(self, dry_run: bool) -> None: - assert ( - self.release_branch == "master" - ), "New release branch can be created only for release type [new]" git = Git() version = get_version_from_repo(git=git) - new_release_branch = f"{version.major}.{version.minor}" - stable_release_type = version.get_stable_release_type() + new_release_branch = self.release_branch version_after_release = copy(version) version_after_release.bump() assert ( @@ -285,11 +289,8 @@ class ReleaseInfo: print( f"Create and push new release branch [{new_release_branch}], commit [{self.commit_sha}]" ) - with checkout(self.release_branch): + with checkout("master"): with checkout_new(new_release_branch): - pr_labels = f"--label {CI.Labels.RELEASE}" - if stable_release_type == VersionType.LTS: - pr_labels += f" --label {CI.Labels.RELEASE_LTS}" cmd_push_branch = ( f"{GIT_PREFIX} push --set-upstream origin {new_release_branch}" ) @@ -302,67 +303,108 @@ class ReleaseInfo: ReleaseInfo._create_gh_label( f"v{new_release_branch}-affected", "c2bfff", dry_run=dry_run ) - Shell.check( - f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {new_release_branch}' - --head {new_release_branch} {pr_labels} - --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.' - """, - dry_run=dry_run, - strict=True, - verbose=True, - ) def get_version_bump_branch(self): return f"bump_version_{self.version}" def update_version_and_contributors_list(self, dry_run: bool) -> None: - # Bump version, update contributors list, create PR - branch_upd_version_contributors = self.get_version_bump_branch() + # Bump version, update contributors list, create on release branch with checkout(self.commit_sha): git = Git() version = get_version_from_repo(git=git) - if self.release_branch == "master": + if self.release_type == "patch": + assert ( + version.string == self.version + ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" + version.bump_patch() + else: + version.reset_tweak() + version.with_description(version.get_stable_release_type()) + + with checkout(self.release_branch): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push" + Shell.check( + cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) + if dry_run: + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + + # TODO: move to new GH step? + if self.release_type == "new": + print("Update version on master branch") + branch_upd_version_contributors = self.get_version_bump_branch() + with checkout(self.commit_sha): + git = Git() + version = get_version_from_repo(git=git) version.bump() version.with_description(VersionType.TESTING) - else: - version.with_description(version.get_stable_release_type()) - assert ( - version.string == self.version - ), f"BUG: version in release info does not match version in git commit, expected [{self.version}], got [{version.string}]" - with checkout(self.release_branch): - with checkout_new(branch_upd_version_contributors): - update_cmake_version(version) - update_contributors(raise_error=True) - cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" - cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" - actor = os.getenv("GITHUB_ACTOR", "") or "me" - body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" - cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base {self.release_branch} --body \"{body}\" --assignee {actor}" + with checkout("master"): + with checkout_new(branch_upd_version_contributors): + update_cmake_version(version) + update_contributors(raise_error=True) + cmd_commit_version_upd = f"{GIT_PREFIX} commit '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}' -m 'Update autogenerated version to {self.version} and contributors'" + cmd_push_branch = f"{GIT_PREFIX} push --set-upstream origin {branch_upd_version_contributors}" + actor = os.getenv("GITHUB_ACTOR", "") or "me" + body = f"Automatic version bump after release {self.release_tag}\n### Changelog category (leave one):\n- Not for changelog (changelog entry is not required)\n" + cmd_create_pr = f"gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Update version after release' --head {branch_upd_version_contributors} --base master --body \"{body}\" --assignee {actor}" + Shell.check( + cmd_commit_version_upd, + strict=True, + dry_run=dry_run, + verbose=True, + ) + Shell.check( + cmd_push_branch, strict=True, dry_run=dry_run, verbose=True + ) + Shell.check( + cmd_create_pr, strict=True, dry_run=dry_run, verbose=True + ) + if dry_run: + Shell.check( + f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + Shell.check( + f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", + verbose=True, + ) + self.version_bump_pr = "dry-run" + else: + self.version_bump_pr = GH.get_pr_url_by_branch( + branch=branch_upd_version_contributors + ) + + # TODO: move to new GH step? + print("Create Release PR") + with checkout(self.release_branch): + pr_labels = f"--label {CI.Labels.RELEASE}" + if version.get_stable_release_type() == VersionType.LTS: + pr_labels += f" --label {CI.Labels.RELEASE_LTS}" Shell.check( - cmd_commit_version_upd, strict=True, dry_run=dry_run, verbose=True + f"""gh pr create --repo {CI.Envs.GITHUB_REPOSITORY} --title 'Release pull request for branch {self.release_branch}' \ + --head {self.release_branch} {pr_labels} \ + --body 'This PullRequest is a part of ClickHouse release cycle. It is used by CI system only. Do not perform any changes with it.'""", + dry_run=dry_run, + strict=True, + verbose=True, ) - Shell.check(cmd_push_branch, strict=True, dry_run=dry_run, verbose=True) - Shell.check(cmd_create_pr, strict=True, dry_run=dry_run, verbose=True) - if dry_run: - Shell.check( - f"{GIT_PREFIX} diff '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", - verbose=True, - ) - Shell.check( - f"{GIT_PREFIX} checkout '{CMAKE_PATH}' '{CONTRIBUTORS_PATH}'", - verbose=True, - ) - self.version_bump_pr = "dry-run" - else: - self.version_bump_pr = GH.get_pr_url_by_branch( - branch=branch_upd_version_contributors - ) def get_change_log_branch(self): return f"auto/{self.release_tag}" def update_release_info(self, dry_run: bool) -> "ReleaseInfo": - if self.release_branch != "master": + if self.release_type == "patch": if not self.changelog_pr: branch = self.get_change_log_branch() if not dry_run: @@ -371,21 +413,22 @@ class ReleaseInfo: url = "dry-run" print(f"ChangeLog PR url [{url}]") self.changelog_pr = url - - if not self.version_bump_pr: - branch = self.get_version_bump_branch() - if not dry_run: - url = GH.get_pr_url_by_branch(branch=branch) - else: - url = "dry-run" - print(f"Version bump PR url [{url}]") - self.version_bump_pr = url - - self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" - print(f"Release url [{self.release_url}]") - self.docker = f"docker run --rm clickhouse/clickhouse:{self.version} clickhouse --version" + else: + # new release branch - find version bump pr on a master branch + branch = self.get_version_bump_branch() + if not dry_run: + url = GH.get_pr_url_by_branch(branch=branch) + else: + url = "dry-run" + print(f"Version bump PR url [{url}]") + self.version_bump_pr = url + + self.release_url = f"https://github.com/{CI.Envs.GITHUB_REPOSITORY}/releases/tag/{self.release_tag}" + print(f"Release url [{self.release_url}]") + self.dump() + return self def create_gh_release(self, packages_files: List[str], dry_run: bool) -> None: @@ -410,35 +453,40 @@ class ReleaseInfo: def merge_prs(self, dry_run: bool) -> None: repo = CI.Envs.GITHUB_REPOSITORY - assert self.version_bump_pr - if dry_run: - version_bump_pr_num = 12345 - else: - version_bump_pr_num = int(self.version_bump_pr.split("/")[-1]) - print("Merging Version bump PR") - res_1 = Shell.check( - f"gh pr merge {version_bump_pr_num} --repo {repo} --merge --auto", - verbose=True, - dry_run=dry_run, - ) - - res_2 = True - if not self.release_tag.endswith("-new"): + if self.release_type == "patch": assert self.changelog_pr print("Merging ChangeLog PR") if dry_run: changelog_pr_num = 23456 else: changelog_pr_num = int(self.changelog_pr.split("/")[-1]) - res_2 = Shell.check( + res = Shell.check( f"gh pr merge {changelog_pr_num} --repo {repo} --merge --auto", verbose=True, dry_run=dry_run, ) else: - assert not self.changelog_pr + if not dry_run: + assert not self.changelog_pr + res = True - self.prs_merged = res_1 and res_2 + if self.release_type == "new": + assert self.version_bump_pr + print("Merging Version Bump PR") + if dry_run: + version_bump_pr = 23456 + else: + version_bump_pr = int(self.version_bump_pr.split("/")[-1]) + res = res and Shell.check( + f"gh pr merge {version_bump_pr} --repo {repo} --merge --auto", + verbose=True, + dry_run=dry_run, + ) + else: + if not dry_run: + assert not self.version_bump_pr + + self.prs_merged = res class RepoTypes: @@ -759,7 +807,7 @@ if __name__ == "__main__": release_info.prepare( commit_ref=args.ref, release_type=args.release_type, - skip_tag_check=args.skip_tag_check, + _skip_tag_check=args.skip_tag_check, ) if args.download_packages: diff --git a/tests/ci/docker_server.py b/tests/ci/docker_server.py index 3251ec5644e..34439c19f0a 100644 --- a/tests/ci/docker_server.py +++ b/tests/ci/docker_server.py @@ -70,7 +70,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--tag-type", type=str, - choices=("head", "release", "latest-release"), + choices=("head", "release", "release-latest"), default="head", help="defines required tags for resulting docker image. " "head - for master image (tag: head) " diff --git a/tests/ci/functional_test_check.py b/tests/ci/functional_test_check.py index 52970404d2d..b7391eff01b 100644 --- a/tests/ci/functional_test_check.py +++ b/tests/ci/functional_test_check.py @@ -114,6 +114,9 @@ def get_run_command( if flaky_check: envs.append("-e NUM_TRIES=50") envs.append("-e MAX_RUN_TIME=2800") + else: + max_run_time = os.getenv("MAX_RUN_TIME", "0") + envs.append(f"-e MAX_RUN_TIME={max_run_time}") envs += [f"-e {e}" for e in additional_envs] diff --git a/tests/ci/integration_tests_runner.py b/tests/ci/integration_tests_runner.py index 84718462ab5..f5dbef4f6db 100755 --- a/tests/ci/integration_tests_runner.py +++ b/tests/ci/integration_tests_runner.py @@ -69,9 +69,9 @@ def get_changed_tests_to_run(pr_info, repo_path): return [] for fpath in changed_files: - if "tests/integration/test_" in fpath: + if re.search(r"tests/integration/test_.*/test.*\.py", fpath) is not None: logging.info("File %s changed and seems like integration test", fpath) - result.add(fpath.split("/")[2]) + result.add("/".join(fpath.split("/")[2:])) return filter_existing_tests(result, repo_path) diff --git a/tests/ci/libfuzzer_test_check.py b/tests/ci/libfuzzer_test_check.py index d9e33229932..8f19dd7d023 100644 --- a/tests/ci/libfuzzer_test_check.py +++ b/tests/ci/libfuzzer_test_check.py @@ -75,7 +75,7 @@ def get_run_command( f"--volume={result_path}:/test_output " "--security-opt seccomp=unconfined " # required to issue io_uring sys-calls f"--cap-add=SYS_PTRACE {env_str} {additional_options_str} {image} " - "python3 ./utils/runner.py" + "python3 /usr/share/clickhouse-test/fuzz/runner.py" ) diff --git a/tests/ci/report.py b/tests/ci/report.py index 15b1512896a..6779a6dae96 100644 --- a/tests/ci/report.py +++ b/tests/ci/report.py @@ -738,7 +738,7 @@ def create_test_html_report( if test_results: rows_part = [] num_fails = 0 - has_test_time = False + has_test_time = any(tr.time is not None for tr in test_results) has_log_urls = False # Display entires with logs at the top (they correspond to failed tests) @@ -770,12 +770,12 @@ def create_test_html_report( row.append(f'{test_result.status}') colspan += 1 - row.append("") - if test_result.time is not None: - has_test_time = True - row.append(str(test_result.time)) - row.append("") - colspan += 1 + if has_test_time: + if test_result.time is not None: + row.append(f"{test_result.time}") + else: + row.append("") + colspan += 1 if test_result.log_urls is not None: has_log_urls = True diff --git a/tests/ci/version_helper.py b/tests/ci/version_helper.py index 07a7a9601c0..b20b2bb25cf 100755 --- a/tests/ci/version_helper.py +++ b/tests/ci/version_helper.py @@ -85,6 +85,16 @@ class ClickHouseVersion: self._tweak = 1 return self + def bump_patch(self) -> "ClickHouseVersion": + self._revision += 1 + self._patch += 1 + self._tweak = 1 + return self + + def reset_tweak(self) -> "ClickHouseVersion": + self._tweak = 1 + return self + def major_update(self) -> "ClickHouseVersion": if self._git is not None: self._git.update() @@ -104,13 +114,6 @@ class ClickHouseVersion: self.major, self.minor, self.patch + 1, self.revision, self._git ) - def reset_tweak(self) -> "ClickHouseVersion": - if self._git is not None: - self._git.update() - return ClickHouseVersion( - self.major, self.minor, self.patch, self.revision, self._git, 1 - ) - @property def major(self) -> int: return self._major diff --git a/tests/config/config.d/transactions.xml b/tests/config/config.d/transactions.xml index 9948b1f1865..64e166b81b5 100644 --- a/tests/config/config.d/transactions.xml +++ b/tests/config/config.d/transactions.xml @@ -1,4 +1,4 @@ - + 42 @@ -18,4 +18,4 @@ 0.01 - + diff --git a/tests/fuzz/README.md b/tests/fuzz/README.md deleted file mode 100644 index 6b5b161b2d5..00000000000 --- a/tests/fuzz/README.md +++ /dev/null @@ -1,23 +0,0 @@ -The list of functions generated via the following query - -``` - clickhouse client -q "SELECT * FROM (SELECT DISTINCT concat('\"', name, '\"') as res FROM system.functions ORDER BY name UNION ALL SELECT concat('\"', a.name, b.name, '\"') as res FROM system.functions as a CROSS JOIN system.aggregate_function_combinators as b WHERE a.is_aggregate = 1) ORDER BY res" > functions.dict -``` - -The list of datatypes generated via the following query: - -``` - clickhouse client -q "SELECT DISTINCT concat('\"', name, '\"') as res FROM system.data_type_families ORDER BY name" > datatypes.dict -``` - -The list of keywords generated via the following query: - -``` - clickhouse client -q "SELECT DISTINCT concat('\"', keyword, '\"') as res FROM system.keywords ORDER BY keyword" > key_words.dict -``` - -Then merge all dictionaries into one (all.dict) - -``` - cat ./dictionaries/* | sort | uniq > all.dict -``` \ No newline at end of file diff --git a/tests/fuzz/all.dict b/tests/fuzz/all.dict index f08e319f0d4..30af3746fca 100644 --- a/tests/fuzz/all.dict +++ b/tests/fuzz/all.dict @@ -1,38 +1,991 @@ +"ADD COLUMN" +"ADD CONSTRAINT" +"ADD INDEX" +"ADD PROJECTION" +"ADD STATISTICS" +"ADD" +"ADMIN OPTION FOR" +"AFTER" +"ALGORITHM" +"ALIAS" +"ALL" +"ALLOWED_LATENESS" +"ALTER COLUMN" +"ALTER DATABASE" +"ALTER LIVE VIEW" +"ALTER POLICY" +"ALTER PROFILE" +"ALTER QUOTA" +"ALTER ROLE" +"ALTER ROW POLICY" +"ALTER SETTINGS PROFILE" +"ALTER TABLE" +"ALTER TEMPORARY TABLE" +"ALTER USER" +"ALTER" +"AND STDOUT" +"AND" +"ANTI" +"ANY" +"APPEND" +"APPLY DELETED MASK" +"APPLY" +"ARRAY JOIN" +"AS" +"ASC" +"ASCENDING" +"ASOF" +"ASSUME" +"AST" +"ASYNC" +"ATTACH PART" +"ATTACH PARTITION" +"ATTACH POLICY" +"ATTACH PROFILE" +"ATTACH QUOTA" +"ATTACH ROLE" +"ATTACH ROW POLICY" +"ATTACH SETTINGS PROFILE" +"ATTACH USER" +"ATTACH" +"AUTO_INCREMENT" +"AZURE" +"AggregateFunction" +"Array" +"BACKUP" +"BCRYPT_HASH" +"BCRYPT_PASSWORD" +"BEGIN TRANSACTION" +"BETWEEN" +"BIDIRECTIONAL" +"BIGINT SIGNED" +"BIGINT UNSIGNED" +"BIGINT" +"BINARY LARGE OBJECT" +"BINARY VARYING" +"BINARY" +"BIT" +"BIT_AND" +"BIT_ANDArgMax" +"BIT_ANDArgMin" +"BIT_ANDArray" +"BIT_ANDDistinct" +"BIT_ANDForEach" +"BIT_ANDIf" +"BIT_ANDMap" +"BIT_ANDMerge" +"BIT_ANDNull" +"BIT_ANDOrDefault" +"BIT_ANDOrNull" +"BIT_ANDResample" +"BIT_ANDSimpleState" +"BIT_ANDState" +"BIT_OR" +"BIT_ORArgMax" +"BIT_ORArgMin" +"BIT_ORArray" +"BIT_ORDistinct" +"BIT_ORForEach" +"BIT_ORIf" +"BIT_ORMap" +"BIT_ORMerge" +"BIT_ORNull" +"BIT_OROrDefault" +"BIT_OROrNull" +"BIT_ORResample" +"BIT_ORSimpleState" +"BIT_ORState" +"BIT_XOR" +"BIT_XORArgMax" +"BIT_XORArgMin" +"BIT_XORArray" +"BIT_XORDistinct" +"BIT_XORForEach" +"BIT_XORIf" +"BIT_XORMap" +"BIT_XORMerge" +"BIT_XORNull" +"BIT_XOROrDefault" +"BIT_XOROrNull" +"BIT_XORResample" +"BIT_XORSimpleState" +"BIT_XORState" +"BLAKE3" +"BLOB" +"BOTH" +"BY" +"BYTE" +"BYTEA" +"Bool" +"CASCADE" +"CASE" +"CAST" +"CHANGE" +"CHANGEABLE_IN_READONLY" +"CHANGED" +"CHAR LARGE OBJECT" +"CHAR VARYING" +"CHAR" +"CHARACTER LARGE OBJECT" +"CHARACTER VARYING" +"CHARACTER" +"CHARACTER_LENGTH" +"CHAR_LENGTH" +"CHECK ALL TABLES" +"CHECK TABLE" +"CHECK" +"CLEANUP" +"CLEAR COLUMN" +"CLEAR INDEX" +"CLEAR PROJECTION" +"CLEAR STATISTICS" +"CLOB" +"CLUSTER" +"CLUSTERS" +"CN" +"CODEC" +"COLLATE" +"COLUMN" +"COLUMNS" +"COMMENT COLUMN" +"COMMENT" +"COMMIT" +"COMPRESSION" +"CONST" +"CONSTRAINT" +"COVAR_POP" +"COVAR_POPArgMax" +"COVAR_POPArgMin" +"COVAR_POPArray" +"COVAR_POPDistinct" +"COVAR_POPForEach" +"COVAR_POPIf" +"COVAR_POPMap" +"COVAR_POPMerge" +"COVAR_POPNull" +"COVAR_POPOrDefault" +"COVAR_POPOrNull" +"COVAR_POPResample" +"COVAR_POPSimpleState" +"COVAR_POPState" +"COVAR_SAMP" +"COVAR_SAMPArgMax" +"COVAR_SAMPArgMin" +"COVAR_SAMPArray" +"COVAR_SAMPDistinct" +"COVAR_SAMPForEach" +"COVAR_SAMPIf" +"COVAR_SAMPMap" +"COVAR_SAMPMerge" +"COVAR_SAMPNull" +"COVAR_SAMPOrDefault" +"COVAR_SAMPOrNull" +"COVAR_SAMPResample" +"COVAR_SAMPSimpleState" +"COVAR_SAMPState" +"CRC32" +"CRC32IEEE" +"CRC64" +"CREATE POLICY" +"CREATE PROFILE" +"CREATE QUOTA" +"CREATE ROLE" +"CREATE ROW POLICY" +"CREATE SETTINGS PROFILE" +"CREATE TABLE" +"CREATE TEMPORARY TABLE" +"CREATE USER" +"CREATE" +"CROSS" +"CUBE" +"CURRENT GRANTS" +"CURRENT QUOTA" +"CURRENT ROLES" +"CURRENT ROW" +"CURRENT TRANSACTION" +"CURRENTUSER" +"CURRENT_USER" +"D" +"DATA INNER UUID" +"DATA" +"DATABASE" +"DATABASES" +"DATE" +"DATEADD" +"DATEDIFF" +"DATESUB" +"DATE_ADD" +"DATE_DIFF" +"DATE_FORMAT" +"DATE_SUB" +"DATE_TRUNC" +"DAY" +"DAYOFMONTH" +"DAYOFWEEK" +"DAYOFYEAR" +"DAYS" +"DD" +"DEC" +"DEDUPLICATE" +"DEFAULT DATABASE" +"DEFAULT ROLE" +"DEFAULT" +"DEFINER" +"DELETE WHERE" +"DELETE" +"DEPENDS ON" +"DESC" +"DESCENDING" +"DESCRIBE" +"DETACH PART" +"DETACH PARTITION" +"DETACH" +"DICTIONARIES" +"DICTIONARY" +"DISK" +"DISTINCT ON" +"DISTINCT" +"DIV" +"DOUBLE PRECISION" +"DOUBLE" +"DOUBLE_SHA1_HASH" +"DOUBLE_SHA1_PASSWORD" +"DROP COLUMN" +"DROP CONSTRAINT" +"DROP DEFAULT" +"DROP DETACHED PART" +"DROP DETACHED PARTITION" +"DROP INDEX" +"DROP PART" +"DROP PARTITION" +"DROP PROJECTION" +"DROP STATISTICS" +"DROP TABLE" +"DROP TEMPORARY TABLE" +"DROP" +"Date" +"Date32" +"DateTime" +"DateTime32" +"DateTime64" +"Decimal" +"Decimal128" +"Decimal256" +"Decimal32" +"Decimal64" +"Dynamic" +"ELSE" +"EMPTY AS" +"EMPTY" +"ENABLED ROLES" +"END" +"ENFORCED" +"ENGINE" +"ENUM" +"EPHEMERAL SEQUENTIAL" +"EPHEMERAL" +"ESTIMATE" +"EVENT" +"EVENTS" +"EVERY" +"EXCEPT DATABASE" +"EXCEPT DATABASES" +"EXCEPT TABLE" +"EXCEPT TABLES" +"EXCEPT" +"EXCHANGE DICTIONARIES" +"EXCHANGE TABLES" +"EXISTS" +"EXPLAIN" +"EXPRESSION" +"EXTENDED" +"EXTERNAL DDL FROM" +"EXTRACT" +"Enum" +"Enum16" +"Enum8" +"FALSE" +"FETCH PART" +"FETCH PARTITION" +"FETCH" +"FIELDS" +"FILE" +"FILESYSTEM CACHE" +"FILESYSTEM CACHES" +"FILTER" +"FINAL" +"FIRST" +"FIXED" +"FLOAT" +"FOLLOWING" +"FOR" +"FOREIGN KEY" +"FOREIGN" +"FORGET PARTITION" +"FORMAT" +"FORMAT_BYTES" +"FQDN" +"FREEZE" +"FROM INFILE" +"FROM SHARD" +"FROM" +"FROM_BASE64" +"FROM_DAYS" +"FROM_UNIXTIME" +"FULL" +"FULLTEXT" +"FUNCTION" +"FixedString" +"Float32" +"Float64" +"ForEach" +"GEOMETRY" +"GLOBAL IN" +"GLOBAL NOT IN" +"GLOBAL" +"GRANT OPTION FOR" +"GRANT" +"GRANTEES" +"GRANULARITY" +"GROUP BY" +"GROUPING SETS" +"GROUPS" +"H" +"HASH" +"HAVING" +"HDFS" +"HH" +"HIERARCHICAL" +"HOST" +"HOUR" +"HOURS" +"HTTP" +"ID" +"IDENTIFIED" +"IF EMPTY" +"IF EXISTS" +"IF NOT EXISTS" +"IGNORE NULLS" +"ILIKE" +"IN PARTITION" +"IN" +"INDEX" +"INDEXES" +"INDICES" +"INET4" +"INET6" +"INET6_ATON" +"INET6_NTOA" +"INET_ATON" +"INET_NTOA" +"INHERIT" +"INJECTIVE" +"INNER" +"INSERT INTO" +"INT SIGNED" +"INT UNSIGNED" +"INT" +"INT1 SIGNED" +"INT1 UNSIGNED" +"INT1" +"INTEGER SIGNED" +"INTEGER UNSIGNED" +"INTEGER" +"INTERPOLATE" +"INTERSECT" +"INTERVAL" +"INTO OUTFILE" +"INVISIBLE" +"INVOKER" +"IP" +"IPv4" +"IPv4CIDRToRange" +"IPv4NumToString" +"IPv4NumToStringClassC" +"IPv4StringToNum" +"IPv4StringToNumOrDefault" +"IPv4StringToNumOrNull" +"IPv4ToIPv6" +"IPv6" +"IPv6CIDRToRange" +"IPv6NumToString" +"IPv6StringToNum" +"IPv6StringToNumOrDefault" +"IPv6StringToNumOrNull" +"IS NOT DISTINCT FROM" +"IS NOT NULL" +"IS NULL" +"IS_OBJECT_ID" +"Int128" +"Int16" +"Int256" +"Int32" +"Int64" +"Int8" +"IntervalDay" +"IntervalHour" +"IntervalMicrosecond" +"IntervalMillisecond" +"IntervalMinute" +"IntervalMonth" +"IntervalNanosecond" +"IntervalQuarter" +"IntervalSecond" +"IntervalWeek" +"IntervalYear" +"JOIN" +"JSON" +"JSONArrayLength" +"JSONExtract" +"JSONExtractArrayRaw" +"JSONExtractBool" +"JSONExtractFloat" +"JSONExtractInt" +"JSONExtractKeys" +"JSONExtractKeysAndValues" +"JSONExtractKeysAndValuesRaw" +"JSONExtractRaw" +"JSONExtractString" +"JSONExtractUInt" +"JSONHas" +"JSONKey" +"JSONLength" +"JSONMergePatch" +"JSONType" +"JSON_ARRAY_LENGTH" +"JSON_EXISTS" +"JSON_QUERY" +"JSON_VALUE" +"JWT" +"KERBEROS" +"KEY BY" +"KEY" +"KEYED BY" +"KEYS" +"KILL" +"KIND" +"L1Distance" +"L1Norm" +"L1Normalize" +"L2Distance" +"L2Norm" +"L2Normalize" +"L2SquaredDistance" +"L2SquaredNorm" +"LARGE OBJECT" +"LAST" +"LAST_DAY" +"LAYOUT" +"LDAP" +"LEADING" +"LEFT ARRAY JOIN" +"LEFT" +"LESS THAN" +"LEVEL" +"LIFETIME" +"LIGHTWEIGHT" +"LIKE" +"LIMIT" +"LINEAR" +"LIST" +"LIVE" +"LOCAL" +"LONGBLOB" +"LONGTEXT" +"LTRIM" +"LineString" +"LinfDistance" +"LinfNorm" +"LinfNormalize" +"LowCardinality" +"LpDistance" +"LpNorm" +"LpNormalize" +"M" +"MACNumToString" +"MACStringToNum" +"MACStringToOUI" +"MAP_FROM_ARRAYS" +"MATCH" +"MATERIALIZE COLUMN" +"MATERIALIZE INDEX" +"MATERIALIZE PROJECTION" +"MATERIALIZE STATISTICS" +"MATERIALIZE TTL" +"MATERIALIZE" +"MATERIALIZED" +"MAX" +"MCS" +"MD4" +"MD5" +"MEDIUMBLOB" +"MEDIUMINT SIGNED" +"MEDIUMINT UNSIGNED" +"MEDIUMINT" +"MEDIUMTEXT" +"MEMORY" +"MERGES" +"METRICS INNER UUID" +"METRICS" +"MI" +"MICROSECOND" +"MICROSECONDS" +"MILLISECOND" +"MILLISECONDS" +"MIN" +"MINUTE" +"MINUTES" +"MM" +"MOD" +"MODIFY COLUMN" +"MODIFY COMMENT" +"MODIFY DEFINER" +"MODIFY ORDER BY" +"MODIFY QUERY" +"MODIFY REFRESH" +"MODIFY SAMPLE BY" +"MODIFY SETTING" +"MODIFY SQL SECURITY" +"MODIFY STATISTICS" +"MODIFY TTL" +"MODIFY" +"MONTH" +"MONTHS" +"MOVE PART" +"MOVE PARTITION" +"MOVE" +"MS" +"MUTATION" +"Map" +"Merge" +"MultiLineString" +"MultiPolygon" +"N" +"NAME" +"NAMED COLLECTION" +"NANOSECOND" +"NANOSECONDS" +"NATIONAL CHAR VARYING" +"NATIONAL CHAR" +"NATIONAL CHARACTER LARGE OBJECT" +"NATIONAL CHARACTER VARYING" +"NATIONAL CHARACTER" +"NCHAR LARGE OBJECT" +"NCHAR VARYING" +"NCHAR" +"NEXT" +"NO ACTION" +"NO DELAY" +"NO LIMITS" +"NONE" +"NOT BETWEEN" +"NOT IDENTIFIED" +"NOT ILIKE" +"NOT IN" +"NOT KEYED" +"NOT LIKE" +"NOT OVERRIDABLE" +"NOT" +"NO_PASSWORD" +"NS" +"NULL" +"NULLS" +"NUMERIC" +"NVARCHAR" +"Nested" +"Nothing" +"Null" +"Nullable" +"OCTET_LENGTH" +"OFFSET" +"ON DELETE" +"ON UPDATE" +"ON VOLUME" +"ON" +"ONLY" +"OPTIMIZE TABLE" +"OR REPLACE" +"OR" +"ORDER BY" +"OUTER" +"OVER" +"OVERRIDABLE" +"Object" +"PART" +"PARTIAL" +"PARTITION BY" +"PARTITION" +"PARTITIONS" +"PART_MOVE_TO_SHARD" +"PASTE" +"PERIODIC REFRESH" +"PERMANENTLY" +"PERMISSIVE" +"PERSISTENT SEQUENTIAL" +"PERSISTENT" +"PIPELINE" +"PLAINTEXT_PASSWORD" +"PLAN" +"POPULATE" +"PRECEDING" +"PRECISION" +"PREWHERE" +"PRIMARY KEY" +"PRIMARY" +"PROFILE" +"PROJECTION" +"PULL" +"Point" +"Polygon" +"Protobuf" +"Q" +"QQ" +"QUALIFY" +"QUARTER" +"QUARTERS" +"QUERY TREE" +"QUERY" +"QUOTA" +"RANDOMIZE FOR" +"RANDOMIZED" +"RANGE" +"READONLY" +"REAL" +"REALM" +"RECOMPRESS" +"RECURSIVE" +"REFERENCES" +"REFRESH" +"REGEXP" +"REGEXP_EXTRACT" +"REGEXP_MATCHES" +"REGEXP_REPLACE" +"REMOVE SAMPLE BY" +"REMOVE TTL" +"REMOVE" +"RENAME COLUMN" +"RENAME DATABASE" +"RENAME DICTIONARY" +"RENAME TABLE" +"RENAME TO" +"RENAME" +"REPLACE PARTITION" +"REPLACE" +"RESET SETTING" +"RESPECT NULLS" +"RESTORE" +"RESTRICT" +"RESTRICTIVE" +"RESUME" +"REVOKE" +"RIGHT" +"ROLLBACK" +"ROLLUP" +"ROW" +"ROWS" +"RTRIM" +"Resample" +"Ring" +"S" +"S3" +"SALT" +"SAMPLE BY" +"SAMPLE" +"SAN" +"SCHEMA" +"SCHEME" +"SECOND" +"SECONDS" +"SELECT" +"SEMI" +"SERVER" +"SET DEFAULT ROLE" +"SET DEFAULT" +"SET FAKE TIME" +"SET NULL" +"SET ROLE DEFAULT" +"SET ROLE" +"SET TRANSACTION SNAPSHOT" +"SET" +"SETTINGS" +"SHA1" +"SHA224" +"SHA256" +"SHA256_HASH" +"SHA256_PASSWORD" +"SHA384" +"SHA512" +"SHA512_256" +"SHOW ACCESS" +"SHOW CREATE" +"SHOW ENGINES" +"SHOW FUNCTIONS" +"SHOW GRANTS" +"SHOW PRIVILEGES" +"SHOW PROCESSLIST" +"SHOW SETTING" +"SHOW" +"SIGNED" +"SIMPLE" +"SINGLE" +"SMALLINT SIGNED" +"SMALLINT UNSIGNED" +"SMALLINT" +"SOURCE" +"SPATIAL" +"SQL SECURITY" +"SQL_TSI_DAY" +"SQL_TSI_HOUR" +"SQL_TSI_MICROSECOND" +"SQL_TSI_MILLISECOND" +"SQL_TSI_MINUTE" +"SQL_TSI_MONTH" +"SQL_TSI_NANOSECOND" +"SQL_TSI_QUARTER" +"SQL_TSI_SECOND" +"SQL_TSI_WEEK" +"SQL_TSI_YEAR" +"SS" +"SSH_KEY" +"SSL_CERTIFICATE" +"START TRANSACTION" +"STATISTICS" +"STD" +"STDArgMax" +"STDArgMin" +"STDArray" +"STDDEV_POP" +"STDDEV_POPArgMax" +"STDDEV_POPArgMin" +"STDDEV_POPArray" +"STDDEV_POPDistinct" +"STDDEV_POPForEach" +"STDDEV_POPIf" +"STDDEV_POPMap" +"STDDEV_POPMerge" +"STDDEV_POPNull" +"STDDEV_POPOrDefault" +"STDDEV_POPOrNull" +"STDDEV_POPResample" +"STDDEV_POPSimpleState" +"STDDEV_POPState" +"STDDEV_SAMP" +"STDDEV_SAMPArgMax" +"STDDEV_SAMPArgMin" +"STDDEV_SAMPArray" +"STDDEV_SAMPDistinct" +"STDDEV_SAMPForEach" +"STDDEV_SAMPIf" +"STDDEV_SAMPMap" +"STDDEV_SAMPMerge" +"STDDEV_SAMPNull" +"STDDEV_SAMPOrDefault" +"STDDEV_SAMPOrNull" +"STDDEV_SAMPResample" +"STDDEV_SAMPSimpleState" +"STDDEV_SAMPState" +"STDDistinct" +"STDForEach" +"STDIf" +"STDMap" +"STDMerge" +"STDNull" +"STDOrDefault" +"STDOrNull" +"STDResample" +"STDSimpleState" +"STDState" +"STEP" +"STORAGE" +"STRICT" +"STRICTLY_ASCENDING" +"SUBPARTITION BY" +"SUBPARTITION" +"SUBPARTITIONS" +"SUBSTRING" +"SUBSTRING_INDEX" +"SUSPEND" +"SVG" +"SYNC" +"SYNTAX" +"SYSTEM" +"SimpleAggregateFunction" +"State" +"String" +"TABLE OVERRIDE" +"TABLE" +"TABLES" +"TAGS INNER UUID" +"TAGS" +"TEMPORARY TABLE" +"TEMPORARY" +"TEST" +"TEXT" +"THEN" +"TIME" +"TIMESTAMP" +"TIMESTAMPADD" +"TIMESTAMPDIFF" +"TIMESTAMPSUB" +"TIMESTAMP_ADD" +"TIMESTAMP_DIFF" +"TIMESTAMP_SUB" +"TINYBLOB" +"TINYINT SIGNED" +"TINYINT UNSIGNED" +"TINYINT" +"TINYTEXT" +"TO DISK" +"TO INNER UUID" +"TO SHARD" +"TO TABLE" +"TO VOLUME" +"TO" +"TOP" +"TOTALS" +"TO_BASE64" +"TO_DAYS" +"TO_UNIXTIME" +"TRACKING ONLY" +"TRAILING" +"TRANSACTION" +"TRIGGER" +"TRIM" +"TRUE" +"TRUNCATE" +"TTL" +"TYPE" +"TYPEOF" +"Tuple" +"UInt128" +"UInt16" +"UInt256" +"UInt32" +"UInt64" +"UInt8" +"ULIDStringToDateTime" +"UNBOUNDED" +"UNDROP" +"UNFREEZE" +"UNION" +"UNIQUE" +"UNSET FAKE TIME" +"UNSIGNED" +"UPDATE" +"URL" +"URLHash" +"URLHierarchy" +"URLPathHierarchy" +"USE" +"USING" +"UTCTimestamp" +"UTC_timestamp" +"UUID" +"UUIDNumToString" +"UUIDStringToNum" +"UUIDToNum" +"UUIDv7ToDateTime" +"VALID UNTIL" +"VALUES" +"VARBINARY" +"VARCHAR" +"VARCHAR2" +"VARYING" +"VAR_POP" +"VAR_POPArgMax" +"VAR_POPArgMin" +"VAR_POPArray" +"VAR_POPDistinct" +"VAR_POPForEach" +"VAR_POPIf" +"VAR_POPMap" +"VAR_POPMerge" +"VAR_POPNull" +"VAR_POPOrDefault" +"VAR_POPOrNull" +"VAR_POPResample" +"VAR_POPSimpleState" +"VAR_POPState" +"VAR_SAMP" +"VAR_SAMPArgMax" +"VAR_SAMPArgMin" +"VAR_SAMPArray" +"VAR_SAMPDistinct" +"VAR_SAMPForEach" +"VAR_SAMPIf" +"VAR_SAMPMap" +"VAR_SAMPMerge" +"VAR_SAMPNull" +"VAR_SAMPOrDefault" +"VAR_SAMPOrNull" +"VAR_SAMPResample" +"VAR_SAMPSimpleState" +"VAR_SAMPState" +"VIEW" +"VISIBLE" +"Variant" +"WATCH" +"WATERMARK" +"WEEK" +"WEEKS" +"WHEN" +"WHERE" +"WINDOW" +"WITH ADMIN OPTION" +"WITH CHECK" +"WITH FILL" +"WITH GRANT OPTION" +"WITH NAME" +"WITH REPLACE OPTION" +"WITH TIES" +"WITH" +"WITH_ITEMINDEX" +"WK" +"WRITABLE" +"WW" +"YEAR" +"YEARS" +"YY" +"YYYY" +"YYYYMMDDToDate" +"YYYYMMDDToDate32" +"YYYYMMDDhhmmssToDateTime" +"YYYYMMDDhhmmssToDateTime64" +"ZKPATH" +"_CAST" +"__actionName" +"__bitBoolMaskAnd" +"__bitBoolMaskOr" +"__bitSwapLastTwo" +"__bitWrapperFunc" +"__getScalar" +"__scalarSubqueryResult" "abs" "accurateCast" "accurateCastOrDefault" "accurateCastOrNull" "acos" "acosh" -"ADD" -"ADD COLUMN" -"ADD CONSTRAINT" "addDate" "addDays" "addHours" -"ADD INDEX" "addInterval" "addMicroseconds" "addMilliseconds" "addMinutes" "addMonths" "addNanoseconds" -"ADD PROJECTION" "addQuarters" -"addressToLine" -"addressToLineWithInlines" -"addressToSymbol" "addSeconds" -"ADD STATISTIC" "addTupleOfIntervals" "addWeeks" "addYears" -"ADMIN OPTION FOR" +"addressToLine" +"addressToLineWithInlines" +"addressToSymbol" "aes_decrypt_mysql" "aes_encrypt_mysql" -"AFTER" "age" -"AggregateFunction" "aggThrow" "aggThrowArgMax" "aggThrowArgMin" @@ -48,24 +1001,7 @@ "aggThrowResample" "aggThrowSimpleState" "aggThrowState" -"ALGORITHM" -"ALIAS" -"ALL" -"ALLOWED_LATENESS" "alphaTokens" -"ALTER" -"ALTER COLUMN" -"ALTER DATABASE" -"ALTER LIVE VIEW" -"ALTER POLICY" -"ALTER PROFILE" -"ALTER QUOTA" -"ALTER ROLE" -"ALTER ROW POLICY" -"ALTER SETTINGS PROFILE" -"ALTER TABLE" -"ALTER TEMPORARY TABLE" -"ALTER USER" "analysisOfVariance" "analysisOfVarianceArgMax" "analysisOfVarianceArgMin" @@ -82,8 +1018,6 @@ "analysisOfVarianceSimpleState" "analysisOfVarianceState" "and" -"AND" -"AND STDOUT" "anova" "anovaArgMax" "anovaArgMin" @@ -99,9 +1033,7 @@ "anovaResample" "anovaSimpleState" "anovaState" -"ANTI" "any" -"ANY" "anyArgMax" "anyArgMin" "anyArray" @@ -136,6 +1068,8 @@ "anyLastOrDefault" "anyLastOrNull" "anyLastResample" +"anyLastSimpleState" +"anyLastState" "anyLast_respect_nulls" "anyLast_respect_nullsArgMax" "anyLast_respect_nullsArgMin" @@ -151,14 +1085,14 @@ "anyLast_respect_nullsResample" "anyLast_respect_nullsSimpleState" "anyLast_respect_nullsState" -"anyLastSimpleState" -"anyLastState" "anyMap" "anyMerge" "anyNull" "anyOrDefault" "anyOrNull" "anyResample" +"anySimpleState" +"anyState" "any_respect_nulls" "any_respect_nullsArgMax" "any_respect_nullsArgMin" @@ -174,8 +1108,6 @@ "any_respect_nullsResample" "any_respect_nullsSimpleState" "any_respect_nullsState" -"anySimpleState" -"anyState" "any_value" "any_valueArgMax" "any_valueArgMin" @@ -189,6 +1121,8 @@ "any_valueOrDefault" "any_valueOrNull" "any_valueResample" +"any_valueSimpleState" +"any_valueState" "any_value_respect_nulls" "any_value_respect_nullsArgMax" "any_value_respect_nullsArgMin" @@ -204,12 +1138,7 @@ "any_value_respect_nullsResample" "any_value_respect_nullsSimpleState" "any_value_respect_nullsState" -"any_valueSimpleState" -"any_valueState" -"APPEND" "appendTrailingCharIfAbsent" -"APPLY" -"APPLY DELETED MASK" "approx_top_count" "approx_top_countArgMax" "approx_top_countArgMin" @@ -286,42 +1215,11 @@ "argMinSimpleState" "argMinState" "array" -"Array" -"array_agg" -"array_aggArgMax" -"array_aggArgMin" -"array_aggArray" -"array_aggDistinct" -"array_aggForEach" -"array_aggIf" -"array_aggMap" -"array_aggMerge" -"array_aggNull" -"array_aggOrDefault" -"array_aggOrNull" -"array_aggResample" -"array_aggSimpleState" -"array_aggState" -"arrayAll" "arrayAUC" +"arrayAll" "arrayAvg" "arrayCompact" "arrayConcat" -"array_concat_agg" -"array_concat_aggArgMax" -"array_concat_aggArgMin" -"array_concat_aggArray" -"array_concat_aggDistinct" -"array_concat_aggForEach" -"array_concat_aggIf" -"array_concat_aggMap" -"array_concat_aggMerge" -"array_concat_aggNull" -"array_concat_aggOrDefault" -"array_concat_aggOrNull" -"array_concat_aggResample" -"array_concat_aggSimpleState" -"array_concat_aggState" "arrayCount" "arrayCumSum" "arrayCumSumNonNegative" @@ -345,7 +1243,6 @@ "arrayIntersect" "arrayJaccardIndex" "arrayJoin" -"ARRAY JOIN" "arrayLast" "arrayLastIndex" "arrayLastOrNull" @@ -382,31 +1279,43 @@ "arrayUniq" "arrayWithConstant" "arrayZip" -"AS" -"ASC" -"ASCENDING" +"array_agg" +"array_aggArgMax" +"array_aggArgMin" +"array_aggArray" +"array_aggDistinct" +"array_aggForEach" +"array_aggIf" +"array_aggMap" +"array_aggMerge" +"array_aggNull" +"array_aggOrDefault" +"array_aggOrNull" +"array_aggResample" +"array_aggSimpleState" +"array_aggState" +"array_concat_agg" +"array_concat_aggArgMax" +"array_concat_aggArgMin" +"array_concat_aggArray" +"array_concat_aggDistinct" +"array_concat_aggForEach" +"array_concat_aggIf" +"array_concat_aggMap" +"array_concat_aggMerge" +"array_concat_aggNull" +"array_concat_aggOrDefault" +"array_concat_aggOrNull" +"array_concat_aggResample" +"array_concat_aggSimpleState" +"array_concat_aggState" "ascii" "asin" "asinh" -"ASOF" -"ASSUME" "assumeNotNull" -"AST" -"ASYNC" "atan" "atan2" "atanh" -"ATTACH" -"ATTACH PART" -"ATTACH PARTITION" -"ATTACH POLICY" -"ATTACH PROFILE" -"ATTACH QUOTA" -"ATTACH ROLE" -"ATTACH ROW POLICY" -"ATTACH SETTINGS PROFILE" -"ATTACH USER" -"AUTO_INCREMENT" "avg" "avgArgMax" "avgArgMin" @@ -437,49 +1346,32 @@ "avgWeightedResample" "avgWeightedSimpleState" "avgWeightedState" -"AZURE" -"BACKUP" "bagexpansion" "bar" "base58Decode" "base58Encode" "base64Decode" "base64Encode" +"base64URLDecode" +"base64URLEncode" "base_backup" "basename" -"BCRYPT_HASH" -"BCRYPT_PASSWORD" -"BEGIN TRANSACTION" -"BETWEEN" -"BIDIRECTIONAL" -"BIGINT" -"BIGINT SIGNED" -"BIGINT UNSIGNED" "bin" -"BINARY" -"BINARY LARGE OBJECT" -"BINARY VARYING" -"BIT" "bitAnd" -"BIT_AND" -"BIT_ANDArgMax" -"BIT_ANDArgMin" -"BIT_ANDArray" -"BIT_ANDDistinct" -"BIT_ANDForEach" -"BIT_ANDIf" -"BIT_ANDMap" -"BIT_ANDMerge" -"BIT_ANDNull" -"BIT_ANDOrDefault" -"BIT_ANDOrNull" -"BIT_ANDResample" -"BIT_ANDSimpleState" -"BIT_ANDState" -"__bitBoolMaskAnd" -"__bitBoolMaskOr" "bitCount" "bitHammingDistance" +"bitNot" +"bitOr" +"bitPositionsToArray" +"bitRotateLeft" +"bitRotateRight" +"bitShiftLeft" +"bitShiftRight" +"bitSlice" +"bitTest" +"bitTestAll" +"bitTestAny" +"bitXor" "bitmapAnd" "bitmapAndCardinality" "bitmapAndnot" @@ -501,59 +1393,11 @@ "bitmapXorCardinality" "bitmaskToArray" "bitmaskToList" -"bitNot" -"bitOr" -"BIT_OR" -"BIT_ORArgMax" -"BIT_ORArgMin" -"BIT_ORArray" -"BIT_ORDistinct" -"BIT_ORForEach" -"BIT_ORIf" -"BIT_ORMap" -"BIT_ORMerge" -"BIT_ORNull" -"BIT_OROrDefault" -"BIT_OROrNull" -"BIT_ORResample" -"BIT_ORSimpleState" -"BIT_ORState" -"bitPositionsToArray" -"bitRotateLeft" -"bitRotateRight" -"bitShiftLeft" -"bitShiftRight" -"bitSlice" -"__bitSwapLastTwo" -"bitTest" -"bitTestAll" -"bitTestAny" -"__bitWrapperFunc" -"bitXor" -"BIT_XOR" -"BIT_XORArgMax" -"BIT_XORArgMin" -"BIT_XORArray" -"BIT_XORDistinct" -"BIT_XORForEach" -"BIT_XORIf" -"BIT_XORMap" -"BIT_XORMerge" -"BIT_XORNull" -"BIT_XOROrDefault" -"BIT_XOROrNull" -"BIT_XORResample" -"BIT_XORSimpleState" -"BIT_XORState" -"BLAKE3" -"BLOB" "blockNumber" "blockSerializedSize" "blockSize" "bool" -"Bool" "boolean" -"BOTH" "boundingRatio" "boundingRatioArgMax" "boundingRatioArgMin" @@ -570,21 +1414,14 @@ "boundingRatioSimpleState" "boundingRatioState" "buildId" -"BY" -"BYTE" -"BYTEA" "byteHammingDistance" "byteSize" "byteSlice" "byteSwap" -"CASCADE" -"CASE" "caseWithExpr" "caseWithExpression" "caseWithoutExpr" "caseWithoutExpression" -"_CAST" -"CAST" "catboostEvaluate" "categoricalInformationValue" "categoricalInformationValueArgMax" @@ -604,50 +1441,24 @@ "cbrt" "ceil" "ceiling" -"CHANGE" -"CHANGEABLE_IN_READONLY" -"CHANGED" +"changeDay" +"changeHour" +"changeMinute" +"changeMonth" +"changeSecond" +"changeYear" "char" -"CHAR" -"CHARACTER" -"CHARACTER LARGE OBJECT" -"CHARACTER_LENGTH" -"CHARACTER VARYING" -"CHAR LARGE OBJECT" -"CHAR_LENGTH" -"CHAR VARYING" -"CHECK" -"CHECK ALL TABLES" -"CHECK TABLE" "cityHash64" -"CLEANUP" -"CLEAR COLUMN" -"CLEAR INDEX" -"CLEAR PROJECTION" -"CLEAR STATISTIC" -"CLOB" -"CLUSTER" +"clamp" "cluster_host_ids" -"CLUSTERS" -"CN" "coalesce" -"CODEC" -"COLLATE" -"COLUMN" -"COLUMNS" -"COMMENT" -"COMMENT COLUMN" -"COMMIT" -"COMPRESSION" "concat" "concatAssumeInjective" "concatWithSeparator" "concatWithSeparatorAssumeInjective" "concat_ws" -"connection_id" "connectionId" -"CONST" -"CONSTRAINT" +"connection_id" "contingency" "contingencyArgMax" "contingencyArgMin" @@ -735,21 +1546,13 @@ "countSubstringsCaseInsensitive" "countSubstringsCaseInsensitiveUTF8" "covarPop" -"COVAR_POP" "covarPopArgMax" -"COVAR_POPArgMax" "covarPopArgMin" -"COVAR_POPArgMin" "covarPopArray" -"COVAR_POPArray" "covarPopDistinct" -"COVAR_POPDistinct" "covarPopForEach" -"COVAR_POPForEach" "covarPopIf" -"COVAR_POPIf" "covarPopMap" -"COVAR_POPMap" "covarPopMatrix" "covarPopMatrixArgMax" "covarPopMatrixArgMin" @@ -766,17 +1569,11 @@ "covarPopMatrixSimpleState" "covarPopMatrixState" "covarPopMerge" -"COVAR_POPMerge" "covarPopNull" -"COVAR_POPNull" "covarPopOrDefault" -"COVAR_POPOrDefault" "covarPopOrNull" -"COVAR_POPOrNull" "covarPopResample" -"COVAR_POPResample" "covarPopSimpleState" -"COVAR_POPSimpleState" "covarPopStable" "covarPopStableArgMax" "covarPopStableArgMin" @@ -793,23 +1590,14 @@ "covarPopStableSimpleState" "covarPopStableState" "covarPopState" -"COVAR_POPState" "covarSamp" -"COVAR_SAMP" "covarSampArgMax" -"COVAR_SAMPArgMax" "covarSampArgMin" -"COVAR_SAMPArgMin" "covarSampArray" -"COVAR_SAMPArray" "covarSampDistinct" -"COVAR_SAMPDistinct" "covarSampForEach" -"COVAR_SAMPForEach" "covarSampIf" -"COVAR_SAMPIf" "covarSampMap" -"COVAR_SAMPMap" "covarSampMatrix" "covarSampMatrixArgMax" "covarSampMatrixArgMin" @@ -826,17 +1614,11 @@ "covarSampMatrixSimpleState" "covarSampMatrixState" "covarSampMerge" -"COVAR_SAMPMerge" "covarSampNull" -"COVAR_SAMPNull" "covarSampOrDefault" -"COVAR_SAMPOrDefault" "covarSampOrNull" -"COVAR_SAMPOrNull" "covarSampResample" -"COVAR_SAMPResample" "covarSampSimpleState" -"COVAR_SAMPSimpleState" "covarSampStable" "covarSampStableArgMax" "covarSampStableArgMin" @@ -853,7 +1635,6 @@ "covarSampStableSimpleState" "covarSampStableState" "covarSampState" -"COVAR_SAMPState" "cramersV" "cramersVArgMax" "cramersVArgMin" @@ -884,38 +1665,17 @@ "cramersVResample" "cramersVSimpleState" "cramersVState" -"CRC32" -"CRC32IEEE" -"CRC64" -"CREATE" -"CREATE POLICY" -"CREATE PROFILE" -"CREATE QUOTA" -"CREATE ROLE" -"CREATE ROW POLICY" -"CREATE SETTINGS PROFILE" -"CREATE TABLE" -"CREATE TEMPORARY TABLE" -"CREATE USER" -"CROSS" -"CUBE" "curdate" -"current_database" "currentDatabase" -"current_date" -"CURRENT GRANTS" "currentProfiles" -"CURRENT QUOTA" "currentRoles" -"CURRENT ROLES" -"CURRENT ROW" -"current_schemas" "currentSchemas" -"current_timestamp" -"CURRENT TRANSACTION" "currentUser" -"CURRENT_USER" -"CURRENTUSER" +"current_database" +"current_date" +"current_schemas" +"current_timestamp" +"current_user" "cutFragment" "cutIPv6" "cutQueryString" @@ -930,59 +1690,25 @@ "cutToFirstSignificantSubdomainWithWWWRFC" "cutURLParameter" "cutWWW" -"D" "damerauLevenshteinDistance" -"DATABASE" -"DATABASES" -"Date" -"DATE" -"Date32" -"DATE_ADD" -"DATEADD" -"date_diff" "dateDiff" -"DATE_DIFF" -"DATEDIFF" -"DATE_FORMAT" "dateName" -"DATE_SUB" -"DATESUB" -"DateTime" -"DateTime32" -"DateTime64" "dateTime64ToSnowflake" +"dateTime64ToSnowflakeID" "dateTimeToSnowflake" +"dateTimeToSnowflakeID" "dateTrunc" -"DATE_TRUNC" -"DAY" -"DAYOFMONTH" -"DAYOFWEEK" -"DAYOFYEAR" -"DAYS" -"DD" -"DEC" -"Decimal" -"Decimal128" -"Decimal256" -"Decimal32" -"Decimal64" +"date_diff" "decodeHTMLComponent" "decodeURLComponent" "decodeURLFormComponent" "decodeXMLComponent" "decrypt" -"DEDUPLICATE" -"DEFAULT" -"DEFAULT DATABASE" "defaultProfiles" -"DEFAULT ROLE" "defaultRoles" "defaultValueOfArgumentType" "defaultValueOfTypeName" -"DEFINER" "degrees" -"DELETE" -"DELETE WHERE" "deltaSum" "deltaSumArgMax" "deltaSumArgMin" @@ -1014,6 +1740,21 @@ "deltaSumTimestampSimpleState" "deltaSumTimestampState" "demangle" +"denseRank" +"denseRankArgMax" +"denseRankArgMin" +"denseRankArray" +"denseRankDistinct" +"denseRankForEach" +"denseRankIf" +"denseRankMap" +"denseRankMerge" +"denseRankNull" +"denseRankOrDefault" +"denseRankOrNull" +"denseRankResample" +"denseRankSimpleState" +"denseRankState" "dense_rank" "dense_rankArgMax" "dense_rankArgMin" @@ -1029,13 +1770,6 @@ "dense_rankResample" "dense_rankSimpleState" "dense_rankState" -"DEPENDS ON" -"DESC" -"DESCENDING" -"DESCRIBE" -"DETACH" -"DETACH PART" -"DETACH PARTITION" "detectCharset" "detectLanguage" "detectLanguageMixed" @@ -1055,6 +1789,10 @@ "dictGetFloat64" "dictGetFloat64OrDefault" "dictGetHierarchy" +"dictGetIPv4" +"dictGetIPv4OrDefault" +"dictGetIPv6" +"dictGetIPv6OrDefault" "dictGetInt16" "dictGetInt16OrDefault" "dictGetInt32" @@ -1063,10 +1801,6 @@ "dictGetInt64OrDefault" "dictGetInt8" "dictGetInt8OrDefault" -"dictGetIPv4" -"dictGetIPv4OrDefault" -"dictGetIPv6" -"dictGetIPv6OrDefault" "dictGetOrDefault" "dictGetOrNull" "dictGetString" @@ -1082,19 +1816,13 @@ "dictGetUUID" "dictGetUUIDOrDefault" "dictHas" -"DICTIONARIES" -"DICTIONARY" "dictIsIn" -"DISK" "displayName" "distanceL1" "distanceL2" "distanceL2Squared" "distanceLinf" "distanceLp" -"DISTINCT" -"DISTINCT ON" -"DIV" "divide" "divideDecimal" "domain" @@ -1102,29 +1830,13 @@ "domainWithoutWWW" "domainWithoutWWWRFC" "dotProduct" -"DOUBLE" -"DOUBLE PRECISION" -"DOUBLE_SHA1_HASH" -"DOUBLE_SHA1_PASSWORD" -"DROP" -"DROP COLUMN" -"DROP CONSTRAINT" -"DROP DEFAULT" -"DROP DETACHED PART" -"DROP DETACHED PARTITION" -"DROP INDEX" -"DROP PART" -"DROP PARTITION" -"DROP PROJECTION" -"DROP STATISTIC" -"DROP TABLE" -"DROP TEMPORARY TABLE" "dumpColumnStructure" +"dynamicElement" +"dynamicType" "e" "editDistance" -"ELSE" +"editDistanceUTF8" "empty" -"EMPTY" "emptyArrayDate" "emptyArrayDateTime" "emptyArrayFloat32" @@ -1139,19 +1851,14 @@ "emptyArrayUInt32" "emptyArrayUInt64" "emptyArrayUInt8" -"EMPTY AS" "enabledProfiles" "enabledRoles" -"ENABLED ROLES" "encodeURLComponent" "encodeURLFormComponent" "encodeXMLComponent" "encrypt" -"END" "endsWith" "endsWithUTF8" -"ENFORCED" -"ENGINE" "entropy" "entropyArgMax" "entropyArgMin" @@ -1167,33 +1874,14 @@ "entropyResample" "entropySimpleState" "entropyState" -"Enum" -"ENUM" -"Enum16" -"Enum8" -"EPHEMERAL" -"EPHEMERAL SEQUENTIAL" "equals" "erf" "erfc" "errorCodeToName" -"ESTIMATE" "evalMLMethod" -"EVENT" -"EVENTS" -"EVERY" -"EXCEPT" -"EXCEPT DATABASE" -"EXCEPT DATABASES" -"EXCEPT TABLE" -"EXCEPT TABLES" -"EXCHANGE DICTIONARIES" -"EXCHANGE TABLES" -"EXISTS" "exp" "exp10" "exp2" -"EXPLAIN" "exponentialMovingAverage" "exponentialMovingAverageArgMax" "exponentialMovingAverageArgMin" @@ -1269,11 +1957,7 @@ "exponentialTimeDecayedSumResample" "exponentialTimeDecayedSumSimpleState" "exponentialTimeDecayedSumState" -"EXPRESSION" -"EXTENDED" -"EXTERNAL DDL FROM" "extract" -"EXTRACT" "extractAll" "extractAllGroups" "extractAllGroupsHorizontal" @@ -1286,24 +1970,13 @@ "extractURLParameterNames" "extractURLParameters" "factorial" -"FALSE" "farmFingerprint64" "farmHash64" -"FETCH" -"FETCH PART" -"FETCH PARTITION" -"FIELDS" "file" -"FILE" "filesystemAvailable" -"FILESYSTEM CACHE" -"FILESYSTEM CACHES" "filesystemCapacity" "filesystemUnreserved" -"FILTER" -"FINAL" "finalizeAggregation" -"FIRST" "firstLine" "firstSignificantSubdomain" "firstSignificantSubdomainCustom" @@ -1322,6 +1995,8 @@ "first_valueOrDefault" "first_valueOrNull" "first_valueResample" +"first_valueSimpleState" +"first_valueState" "first_value_respect_nulls" "first_value_respect_nullsArgMax" "first_value_respect_nullsArgMin" @@ -1337,10 +2012,6 @@ "first_value_respect_nullsResample" "first_value_respect_nullsSimpleState" "first_value_respect_nullsState" -"first_valueSimpleState" -"first_valueState" -"FIXED" -"FixedString" "flameGraph" "flameGraphArgMax" "flameGraphArgMin" @@ -1358,19 +2029,8 @@ "flameGraphState" "flatten" "flattenTuple" -"FLOAT" -"Float32" -"Float64" "floor" -"FOLLOWING" -"FOR" -"ForEach" -"FOREIGN" -"FOREIGN KEY" -"FORGET PARTITION" "format" -"FORMAT" -"FORMAT_BYTES" "formatDateTime" "formatDateTimeInJodaSyntax" "formatQuery" @@ -1383,67 +2043,50 @@ "formatReadableTimeDelta" "formatRow" "formatRowNoNewline" -"FQDN" "fragment" -"FREEZE" -"FROM" -"FROM_BASE64" -"FROM_DAYS" "fromDaysSinceYearZero" "fromDaysSinceYearZero32" -"FROM INFILE" "fromModifiedJulianDay" "fromModifiedJulianDayOrNull" -"FROM SHARD" -"FROM_UNIXTIME" +"fromUTCTimestamp" "fromUnixTimestamp" "fromUnixTimestamp64Micro" "fromUnixTimestamp64Milli" "fromUnixTimestamp64Nano" "fromUnixTimestampInJodaSyntax" "from_utc_timestamp" -"fromUTCTimestamp" -"FULL" "fullHostName" -"FULLTEXT" -"FUNCTION" "fuzzBits" "gccMurmurHash" "gcd" "generateRandomStructure" +"generateSnowflakeID" "generateULID" "generateUUIDv4" +"generateUUIDv7" "geoDistance" +"geoToH3" +"geoToS2" "geohashDecode" "geohashEncode" "geohashesInBox" -"GEOMETRY" -"geoToH3" -"geoToS2" +"getClientHTTPHeader" "getMacro" "getOSKernelVersion" -"__getScalar" "getServerPort" "getSetting" "getSizeOfEnumType" "getSubcolumn" "getTypeSerializationStreams" -"GLOBAL" "globalIn" -"GLOBAL IN" "globalInIgnoreSet" "globalNotIn" -"GLOBAL NOT IN" "globalNotInIgnoreSet" "globalNotNullIn" "globalNotNullInIgnoreSet" "globalNullIn" "globalNullInIgnoreSet" "globalVariable" -"GRANT" -"GRANTEES" -"GRANT OPTION FOR" -"GRANULARITY" "greatCircleAngle" "greatCircleDistance" "greater" @@ -1584,6 +2227,36 @@ "groupBitAndResample" "groupBitAndSimpleState" "groupBitAndState" +"groupBitOr" +"groupBitOrArgMax" +"groupBitOrArgMin" +"groupBitOrArray" +"groupBitOrDistinct" +"groupBitOrForEach" +"groupBitOrIf" +"groupBitOrMap" +"groupBitOrMerge" +"groupBitOrNull" +"groupBitOrOrDefault" +"groupBitOrOrNull" +"groupBitOrResample" +"groupBitOrSimpleState" +"groupBitOrState" +"groupBitXor" +"groupBitXorArgMax" +"groupBitXorArgMin" +"groupBitXorArray" +"groupBitXorDistinct" +"groupBitXorForEach" +"groupBitXorIf" +"groupBitXorMap" +"groupBitXorMerge" +"groupBitXorNull" +"groupBitXorOrDefault" +"groupBitXorOrNull" +"groupBitXorResample" +"groupBitXorSimpleState" +"groupBitXorState" "groupBitmap" "groupBitmapAnd" "groupBitmapAndArgMax" @@ -1643,39 +2316,21 @@ "groupBitmapXorResample" "groupBitmapXorSimpleState" "groupBitmapXorState" -"groupBitOr" -"groupBitOrArgMax" -"groupBitOrArgMin" -"groupBitOrArray" -"groupBitOrDistinct" -"groupBitOrForEach" -"groupBitOrIf" -"groupBitOrMap" -"groupBitOrMerge" -"groupBitOrNull" -"groupBitOrOrDefault" -"groupBitOrOrNull" -"groupBitOrResample" -"groupBitOrSimpleState" -"groupBitOrState" -"groupBitXor" -"groupBitXorArgMax" -"groupBitXorArgMin" -"groupBitXorArray" -"groupBitXorDistinct" -"groupBitXorForEach" -"groupBitXorIf" -"groupBitXorMap" -"groupBitXorMerge" -"groupBitXorNull" -"groupBitXorOrDefault" -"groupBitXorOrNull" -"groupBitXorResample" -"groupBitXorSimpleState" -"groupBitXorState" -"GROUP BY" -"GROUPING SETS" -"GROUPS" +"groupConcat" +"groupConcatArgMax" +"groupConcatArgMin" +"groupConcatArray" +"groupConcatDistinct" +"groupConcatForEach" +"groupConcatIf" +"groupConcatMap" +"groupConcatMerge" +"groupConcatNull" +"groupConcatOrDefault" +"groupConcatOrNull" +"groupConcatResample" +"groupConcatSimpleState" +"groupConcatState" "groupUniqArray" "groupUniqArrayArgMax" "groupUniqArrayArgMin" @@ -1691,7 +2346,21 @@ "groupUniqArrayResample" "groupUniqArraySimpleState" "groupUniqArrayState" -"H" +"group_concat" +"group_concatArgMax" +"group_concatArgMin" +"group_concatArray" +"group_concatDistinct" +"group_concatForEach" +"group_concatIf" +"group_concatMap" +"group_concatMerge" +"group_concatNull" +"group_concatOrDefault" +"group_concatOrNull" +"group_concatResample" +"group_concatSimpleState" +"group_concatState" "h3CellAreaM2" "h3CellAreaRads2" "h3Distance" @@ -1719,7 +2388,6 @@ "h3IsPentagon" "h3IsResClassIII" "h3IsValid" -"h3kRing" "h3Line" "h3NumHexagons" "h3PointDistKm" @@ -1732,12 +2400,12 @@ "h3ToParent" "h3ToString" "h3UnidirectionalEdgeIsValid" +"h3kRing" "halfMD5" "has" "hasAll" "hasAny" "hasColumnInTable" -"HASH" "hasSubsequence" "hasSubsequenceCaseInsensitive" "hasSubsequenceCaseInsensitiveUTF8" @@ -1748,11 +2416,9 @@ "hasTokenCaseInsensitive" "hasTokenCaseInsensitiveOrNull" "hasTokenOrNull" -"HAVING" -"HDFS" "hex" -"HH" -"HIERARCHICAL" +"hilbertDecode" +"hilbertEncode" "histogram" "histogramArgMax" "histogramArgMin" @@ -1772,73 +2438,33 @@ "hop" "hopEnd" "hopStart" -"HOST" -"hostname" "hostName" -"HOUR" -"HOURS" -"HTTP" +"hostname" "hypot" -"ID" -"IDENTIFIED" "identity" "idnaDecode" "idnaEncode" "if" -"IF EMPTY" -"IF EXISTS" -"IF NOT EXISTS" "ifNotFinite" "ifNull" "ignore" -"IGNORE NULLS" "ilike" -"ILIKE" "in" -"IN" -"INDEX" -"INDEXES" +"inIgnoreSet" "indexHint" "indexOf" -"INDICES" -"INET4" -"INET6" -"INET6_ATON" -"INET6_NTOA" -"INET_ATON" -"INET_NTOA" -"INHERIT" -"inIgnoreSet" "initcap" "initcapUTF8" -"initializeAggregation" -"initial_query_id" "initialQueryID" -"INJECTIVE" -"INNER" -"IN PARTITION" -"INSERT INTO" +"initial_query_id" +"initializeAggregation" "instr" -"INT" -"INT1" -"Int128" -"Int16" -"INT1 SIGNED" -"INT1 UNSIGNED" -"Int256" -"Int32" -"Int64" -"Int8" "intDiv" "intDivOrZero" -"INTEGER" -"INTEGER SIGNED" -"INTEGER UNSIGNED" -"INTERPOLATE" -"INTERSECT" -"INTERVAL" -"IntervalDay" -"IntervalHour" +"intExp10" +"intExp2" +"intHash32" +"intHash64" "intervalLengthSum" "intervalLengthSumArgMax" "intervalLengthSumArgMin" @@ -1854,55 +2480,18 @@ "intervalLengthSumResample" "intervalLengthSumSimpleState" "intervalLengthSumState" -"IntervalMicrosecond" -"IntervalMillisecond" -"IntervalMinute" -"IntervalMonth" -"IntervalNanosecond" -"IntervalQuarter" -"IntervalSecond" -"IntervalWeek" -"IntervalYear" -"intExp10" -"intExp2" -"intHash32" -"intHash64" -"INTO OUTFILE" -"INT SIGNED" -"INT UNSIGNED" -"INVISIBLE" -"INVOKER" -"IP" -"IPv4" -"IPv4CIDRToRange" -"IPv4NumToString" -"IPv4NumToStringClassC" -"IPv4StringToNum" -"IPv4StringToNumOrDefault" -"IPv4StringToNumOrNull" -"IPv4ToIPv6" -"IPv6" -"IPv6CIDRToRange" -"IPv6NumToString" -"IPv6StringToNum" -"IPv6StringToNumOrDefault" -"IPv6StringToNumOrNull" "isConstant" "isDecimalOverflow" "isFinite" -"isInfinite" "isIPAddressInRange" "isIPv4String" "isIPv6String" +"isInfinite" "isNaN" "isNotDistinctFrom" -"IS NOT DISTINCT FROM" "isNotNull" -"IS NOT NULL" "isNull" -"IS NULL" "isNullable" -"IS_OBJECT_ID" "isValidJSON" "isValidUTF8" "isZeroOrNull" @@ -1910,40 +2499,11 @@ "jaroWinklerSimilarity" "javaHash" "javaHashUTF16LE" -"JOIN" "joinGet" "joinGetOrNull" -"JSON" -"JSONArrayLength" -"JSON_ARRAY_LENGTH" -"JSON_EXISTS" -"JSONExtract" -"JSONExtractArrayRaw" -"JSONExtractBool" -"JSONExtractFloat" -"JSONExtractInt" -"JSONExtractKeys" -"JSONExtractKeysAndValues" -"JSONExtractKeysAndValuesRaw" -"JSONExtractRaw" -"JSONExtractString" -"JSONExtractUInt" -"JSONHas" -"JSONKey" -"JSONLength" "jsonMergePatch" -"JSON_QUERY" -"JSONType" -"JSON_VALUE" "jumpConsistentHash" "kafkaMurmurHash" -"KERBEROS" -"KEY" -"KEY BY" -"KEYED BY" -"KEYS" -"KILL" -"KIND" "kolmogorovSmirnovTest" "kolmogorovSmirnovTestArgMax" "kolmogorovSmirnovTestArgMin" @@ -1992,14 +2552,6 @@ "kurtSampResample" "kurtSampSimpleState" "kurtSampState" -"L1Distance" -"L1Norm" -"L1Normalize" -"L2Distance" -"L2Norm" -"L2Normalize" -"L2SquaredDistance" -"L2SquaredNorm" "lagInFrame" "lagInFrameArgMax" "lagInFrameArgMin" @@ -2015,7 +2567,6 @@ "lagInFrameResample" "lagInFrameSimpleState" "lagInFrameState" -"LARGE OBJECT" "largestTriangleThreeBuckets" "largestTriangleThreeBucketsArgMax" "largestTriangleThreeBucketsArgMin" @@ -2031,8 +2582,6 @@ "largestTriangleThreeBucketsResample" "largestTriangleThreeBucketsSimpleState" "largestTriangleThreeBucketsState" -"LAST" -"LAST_DAY" "last_value" "last_valueArgMax" "last_valueArgMin" @@ -2046,6 +2595,8 @@ "last_valueOrDefault" "last_valueOrNull" "last_valueResample" +"last_valueSimpleState" +"last_valueState" "last_value_respect_nulls" "last_value_respect_nullsArgMax" "last_value_respect_nullsArgMin" @@ -2061,12 +2612,8 @@ "last_value_respect_nullsResample" "last_value_respect_nullsSimpleState" "last_value_respect_nullsState" -"last_valueSimpleState" -"last_valueState" -"LAYOUT" "lcase" "lcm" -"LDAP" "leadInFrame" "leadInFrameArgMax" "leadInFrameArgMin" @@ -2082,11 +2629,8 @@ "leadInFrameResample" "leadInFrameSimpleState" "leadInFrameState" -"LEADING" "least" "left" -"LEFT" -"LEFT ARRAY JOIN" "leftPad" "leftPadUTF8" "leftUTF8" @@ -2095,42 +2639,23 @@ "lengthUTF8" "less" "lessOrEquals" -"LESS THAN" -"LEVEL" "levenshteinDistance" +"levenshteinDistanceUTF8" "lgamma" -"LIFETIME" -"LIGHTWEIGHT" "like" -"LIKE" -"LIMIT" -"LINEAR" -"LinfDistance" -"LinfNorm" -"LinfNormalize" -"LIST" -"LIVE" "ln" -"LOCAL" "locate" "log" "log10" "log1p" "log2" "logTrace" -"LONGBLOB" -"LONGTEXT" -"LowCardinality" "lowCardinalityIndices" "lowCardinalityKeys" "lower" "lowerUTF8" "lpad" -"LpDistance" -"LpNorm" -"LpNormalize" "ltrim" -"LTRIM" "lttb" "lttbArgMax" "lttbArgMin" @@ -2146,10 +2671,6 @@ "lttbResample" "lttbSimpleState" "lttbState" -"M" -"MACNumToString" -"MACStringToNum" -"MACStringToOUI" "makeDate" "makeDate32" "makeDateTime" @@ -2170,7 +2691,6 @@ "mannWhitneyUTestSimpleState" "mannWhitneyUTestState" "map" -"Map" "mapAdd" "mapAll" "mapApply" @@ -2181,7 +2701,6 @@ "mapExtractKeyLike" "mapFilter" "mapFromArrays" -"MAP_FROM_ARRAYS" "mapFromString" "mapKeys" "mapPartialReverseSort" @@ -2193,17 +2712,8 @@ "mapUpdate" "mapValues" "match" -"MATCH" "materialize" -"MATERIALIZE" -"MATERIALIZE COLUMN" -"MATERIALIZED" -"MATERIALIZE INDEX" -"MATERIALIZE PROJECTION" -"MATERIALIZE STATISTIC" -"MATERIALIZE TTL" "max" -"MAX" "max2" "maxArgMax" "maxArgMin" @@ -2264,9 +2774,6 @@ "maxResample" "maxSimpleState" "maxState" -"MCS" -"MD4" -"MD5" "meanZTest" "meanZTestArgMax" "meanZTestArgMin" @@ -2507,23 +3014,9 @@ "medianTimingWeightedResample" "medianTimingWeightedSimpleState" "medianTimingWeightedState" -"MEDIUMBLOB" -"MEDIUMINT" -"MEDIUMINT SIGNED" -"MEDIUMINT UNSIGNED" -"MEDIUMTEXT" -"MEMORY" -"Merge" -"MERGES" "metroHash64" -"MI" -"MICROSECOND" -"MICROSECONDS" "mid" -"MILLISECOND" -"MILLISECONDS" "min" -"MIN" "min2" "minArgMax" "minArgMin" @@ -2558,35 +3051,15 @@ "minSimpleState" "minState" "minus" -"MINUTE" -"MINUTES" "mismatches" -"MM" "mod" -"MOD" -"MODIFY" -"MODIFY COLUMN" -"MODIFY COMMENT" -"MODIFY ORDER BY" -"MODIFY QUERY" -"MODIFY REFRESH" -"MODIFY SAMPLE BY" -"MODIFY SETTING" -"MODIFY SQL SECURITY" -"MODIFY TTL" "modulo" "moduloLegacy" "moduloOrZero" -"MONTH" "monthName" -"MONTHS" "mortonDecode" "mortonEncode" -"MOVE" -"MOVE PART" -"MOVE PARTITION" "movingXXX" -"MS" "multiFuzzyMatchAllIndices" "multiFuzzyMatchAny" "multiFuzzyMatchAnyIndex" @@ -2594,9 +3067,6 @@ "multiMatchAllIndices" "multiMatchAny" "multiMatchAnyIndex" -"multiply" -"multiplyDecimal" -"MultiPolygon" "multiSearchAllPositions" "multiSearchAllPositionsCaseInsensitive" "multiSearchAllPositionsCaseInsensitiveUTF8" @@ -2613,31 +3083,17 @@ "multiSearchFirstPositionCaseInsensitive" "multiSearchFirstPositionCaseInsensitiveUTF8" "multiSearchFirstPositionUTF8" +"multiply" +"multiplyDecimal" "murmurHash2_32" "murmurHash2_64" "murmurHash3_128" "murmurHash3_32" "murmurHash3_64" -"MUTATION" -"N" -"NAME" -"NAMED COLLECTION" -"NANOSECOND" -"NANOSECONDS" -"NATIONAL CHAR" -"NATIONAL CHARACTER" -"NATIONAL CHARACTER LARGE OBJECT" -"NATIONAL CHARACTER VARYING" -"NATIONAL CHAR VARYING" -"NCHAR" -"NCHAR LARGE OBJECT" -"NCHAR VARYING" "negate" "neighbor" "nested" -"Nested" "netloc" -"NEXT" "ngramDistance" "ngramDistanceCaseInsensitive" "ngramDistanceCaseInsensitiveUTF8" @@ -2650,7 +3106,6 @@ "ngramMinHashCaseInsensitive" "ngramMinHashCaseInsensitiveUTF8" "ngramMinHashUTF8" -"ngrams" "ngramSearch" "ngramSearchCaseInsensitive" "ngramSearchCaseInsensitiveUTF8" @@ -2659,10 +3114,7 @@ "ngramSimHashCaseInsensitive" "ngramSimHashCaseInsensitiveUTF8" "ngramSimHashUTF8" -"NO ACTION" -"NO DELAY" -"NO LIMITS" -"NONE" +"ngrams" "nonNegativeDerivative" "nonNegativeDerivativeArgMax" "nonNegativeDerivativeArgMin" @@ -2678,9 +3130,11 @@ "nonNegativeDerivativeResample" "nonNegativeDerivativeSimpleState" "nonNegativeDerivativeState" -"NO_PASSWORD" -"normalizedQueryHash" -"normalizedQueryHashKeepNames" +"normL1" +"normL2" +"normL2Squared" +"normLinf" +"normLp" "normalizeL1" "normalizeL2" "normalizeLinf" @@ -2691,18 +3145,18 @@ "normalizeUTF8NFD" "normalizeUTF8NFKC" "normalizeUTF8NFKD" -"normL1" -"normL2" -"normL2Squared" -"normLinf" -"normLp" +"normalizedQueryHash" +"normalizedQueryHashKeepNames" "not" -"NOT" -"NOT BETWEEN" "notEmpty" "notEquals" +"notILike" +"notIn" +"notInIgnoreSet" +"notLike" +"notNullIn" +"notNullInIgnoreSet" "nothing" -"Nothing" "nothingArgMax" "nothingArgMin" "nothingArray" @@ -2746,22 +3200,9 @@ "nothingUInt64Resample" "nothingUInt64SimpleState" "nothingUInt64State" -"NOT IDENTIFIED" -"notILike" -"NOT ILIKE" -"notIn" -"NOT IN" -"notInIgnoreSet" -"NOT KEYED" -"notLike" -"NOT LIKE" -"notNullIn" -"notNullInIgnoreSet" -"NOT OVERRIDABLE" "now" "now64" "nowInBlock" -"NS" "nth_value" "nth_valueArgMax" "nth_valueArgMin" @@ -2792,31 +3233,10 @@ "ntileResample" "ntileSimpleState" "ntileState" -"Null" -"NULL" -"Nullable" "nullIf" "nullIn" "nullInIgnoreSet" -"NULLS" -"NUMERIC" -"NVARCHAR" -"Object" -"OCTET_LENGTH" -"OFFSET" -"ON" -"ON DELETE" -"ONLY" -"ON UPDATE" -"ON VOLUME" -"OPTIMIZE TABLE" "or" -"OR" -"ORDER BY" -"OR REPLACE" -"OUTER" -"OVER" -"OVERRIDABLE" "parseDateTime" "parseDateTime32BestEffort" "parseDateTime32BestEffortOrNull" @@ -2838,32 +3258,49 @@ "parseDateTimeInJodaSyntaxOrZero" "parseDateTimeOrNull" "parseDateTimeOrZero" +"parseReadableSize" +"parseReadableSizeOrNull" +"parseReadableSizeOrZero" "parseTimeDelta" -"PART" -"PARTIAL" -"PARTITION" -"PARTITION BY" +"partitionID" "partitionId" -"PARTITIONS" -"PART_MOVE_TO_SHARD" -"PASTE" "path" "pathFull" -"PERIODIC REFRESH" -"PERMANENTLY" -"PERMISSIVE" -"PERSISTENT" -"PERSISTENT SEQUENTIAL" +"percentRank" +"percentRankArgMax" +"percentRankArgMin" +"percentRankArray" +"percentRankDistinct" +"percentRankForEach" +"percentRankIf" +"percentRankMap" +"percentRankMerge" +"percentRankNull" +"percentRankOrDefault" +"percentRankOrNull" +"percentRankResample" +"percentRankSimpleState" +"percentRankState" +"percent_rank" +"percent_rankArgMax" +"percent_rankArgMin" +"percent_rankArray" +"percent_rankDistinct" +"percent_rankForEach" +"percent_rankIf" +"percent_rankMap" +"percent_rankMerge" +"percent_rankNull" +"percent_rankOrDefault" +"percent_rankOrNull" +"percent_rankResample" +"percent_rankSimpleState" +"percent_rankState" "pi" -"PIPELINE" -"PLAINTEXT_PASSWORD" -"PLAN" "plus" "pmod" -"Point" "pointInEllipses" "pointInPolygon" -"Polygon" "polygonAreaCartesian" "polygonAreaSpherical" "polygonConvexHullCartesian" @@ -2880,32 +3317,21 @@ "polygonsUnionSpherical" "polygonsWithinCartesian" "polygonsWithinSpherical" -"POPULATE" "port" "portRFC" "position" "positionCaseInsensitive" "positionCaseInsensitiveUTF8" "positionUTF8" -"positive_modulo" "positiveModulo" +"positive_modulo" "pow" "power" -"PRECEDING" -"PRECISION" -"PREWHERE" -"PRIMARY" -"PRIMARY KEY" -"PROFILE" -"PROJECTION" +"printf" "proportionsZTest" -"Protobuf" "protocol" -"PULL" "punycodeDecode" "punycodeEncode" -"Q" -"QQ" "quantile" "quantileArgMax" "quantileArgMin" @@ -3099,6 +3525,68 @@ "quantileOrDefault" "quantileOrNull" "quantileResample" +"quantileSimpleState" +"quantileState" +"quantileTDigest" +"quantileTDigestArgMax" +"quantileTDigestArgMin" +"quantileTDigestArray" +"quantileTDigestDistinct" +"quantileTDigestForEach" +"quantileTDigestIf" +"quantileTDigestMap" +"quantileTDigestMerge" +"quantileTDigestNull" +"quantileTDigestOrDefault" +"quantileTDigestOrNull" +"quantileTDigestResample" +"quantileTDigestSimpleState" +"quantileTDigestState" +"quantileTDigestWeighted" +"quantileTDigestWeightedArgMax" +"quantileTDigestWeightedArgMin" +"quantileTDigestWeightedArray" +"quantileTDigestWeightedDistinct" +"quantileTDigestWeightedForEach" +"quantileTDigestWeightedIf" +"quantileTDigestWeightedMap" +"quantileTDigestWeightedMerge" +"quantileTDigestWeightedNull" +"quantileTDigestWeightedOrDefault" +"quantileTDigestWeightedOrNull" +"quantileTDigestWeightedResample" +"quantileTDigestWeightedSimpleState" +"quantileTDigestWeightedState" +"quantileTiming" +"quantileTimingArgMax" +"quantileTimingArgMin" +"quantileTimingArray" +"quantileTimingDistinct" +"quantileTimingForEach" +"quantileTimingIf" +"quantileTimingMap" +"quantileTimingMerge" +"quantileTimingNull" +"quantileTimingOrDefault" +"quantileTimingOrNull" +"quantileTimingResample" +"quantileTimingSimpleState" +"quantileTimingState" +"quantileTimingWeighted" +"quantileTimingWeightedArgMax" +"quantileTimingWeightedArgMin" +"quantileTimingWeightedArray" +"quantileTimingWeightedDistinct" +"quantileTimingWeightedForEach" +"quantileTimingWeightedIf" +"quantileTimingWeightedMap" +"quantileTimingWeightedMerge" +"quantileTimingWeightedNull" +"quantileTimingWeightedOrDefault" +"quantileTimingWeightedOrNull" +"quantileTimingWeightedResample" +"quantileTimingWeightedSimpleState" +"quantileTimingWeightedState" "quantiles" "quantilesArgMax" "quantilesArgMin" @@ -3271,7 +3759,6 @@ "quantilesGKSimpleState" "quantilesGKState" "quantilesIf" -"quantileSimpleState" "quantilesInterpolatedWeighted" "quantilesInterpolatedWeightedArgMax" "quantilesInterpolatedWeightedArgMin" @@ -3295,7 +3782,6 @@ "quantilesResample" "quantilesSimpleState" "quantilesState" -"quantileState" "quantilesTDigest" "quantilesTDigestArgMax" "quantilesTDigestArgMin" @@ -3356,75 +3842,10 @@ "quantilesTimingWeightedResample" "quantilesTimingWeightedSimpleState" "quantilesTimingWeightedState" -"quantileTDigest" -"quantileTDigestArgMax" -"quantileTDigestArgMin" -"quantileTDigestArray" -"quantileTDigestDistinct" -"quantileTDigestForEach" -"quantileTDigestIf" -"quantileTDigestMap" -"quantileTDigestMerge" -"quantileTDigestNull" -"quantileTDigestOrDefault" -"quantileTDigestOrNull" -"quantileTDigestResample" -"quantileTDigestSimpleState" -"quantileTDigestState" -"quantileTDigestWeighted" -"quantileTDigestWeightedArgMax" -"quantileTDigestWeightedArgMin" -"quantileTDigestWeightedArray" -"quantileTDigestWeightedDistinct" -"quantileTDigestWeightedForEach" -"quantileTDigestWeightedIf" -"quantileTDigestWeightedMap" -"quantileTDigestWeightedMerge" -"quantileTDigestWeightedNull" -"quantileTDigestWeightedOrDefault" -"quantileTDigestWeightedOrNull" -"quantileTDigestWeightedResample" -"quantileTDigestWeightedSimpleState" -"quantileTDigestWeightedState" -"quantileTiming" -"quantileTimingArgMax" -"quantileTimingArgMin" -"quantileTimingArray" -"quantileTimingDistinct" -"quantileTimingForEach" -"quantileTimingIf" -"quantileTimingMap" -"quantileTimingMerge" -"quantileTimingNull" -"quantileTimingOrDefault" -"quantileTimingOrNull" -"quantileTimingResample" -"quantileTimingSimpleState" -"quantileTimingState" -"quantileTimingWeighted" -"quantileTimingWeightedArgMax" -"quantileTimingWeightedArgMin" -"quantileTimingWeightedArray" -"quantileTimingWeightedDistinct" -"quantileTimingWeightedForEach" -"quantileTimingWeightedIf" -"quantileTimingWeightedMap" -"quantileTimingWeightedMerge" -"quantileTimingWeightedNull" -"quantileTimingWeightedOrDefault" -"quantileTimingWeightedOrNull" -"quantileTimingWeightedResample" -"quantileTimingWeightedSimpleState" -"quantileTimingWeightedState" -"QUARTER" -"QUARTERS" -"QUERY" -"query_id" "queryID" "queryString" "queryStringAndFragment" -"QUERY TREE" -"QUOTA" +"query_id" "radians" "rand" "rand32" @@ -3439,17 +3860,14 @@ "randLogNormal" "randNegativeBinomial" "randNormal" -"randomFixedString" -"RANDOMIZED" -"RANDOMIZE FOR" -"randomPrintableASCII" -"randomString" -"randomStringUTF8" "randPoisson" "randStudentT" "randUniform" +"randomFixedString" +"randomPrintableASCII" +"randomString" +"randomStringUTF8" "range" -"RANGE" "rank" "rankArgMax" "rankArgMin" @@ -3480,22 +3898,14 @@ "rankResample" "rankSimpleState" "rankState" -"READONLY" +"readWKTLineString" +"readWKTMultiLineString" "readWKTMultiPolygon" "readWKTPoint" "readWKTPolygon" "readWKTRing" -"REAL" -"REALM" -"RECOMPRESS" -"REFERENCES" -"REFRESH" -"REGEXP" "regexpExtract" -"REGEXP_EXTRACT" -"REGEXP_MATCHES" "regexpQuoteMeta" -"REGEXP_REPLACE" "regionHierarchy" "regionIn" "regionToArea" @@ -3526,31 +3936,13 @@ "reinterpretAsUInt64" "reinterpretAsUInt8" "reinterpretAsUUID" -"REMOVE" -"REMOVE SAMPLE BY" -"REMOVE TTL" -"RENAME" -"RENAME COLUMN" -"RENAME DATABASE" -"RENAME DICTIONARY" -"RENAME TABLE" -"RENAME TO" "repeat" "replace" -"REPLACE" "replaceAll" "replaceOne" -"REPLACE PARTITION" "replaceRegexpAll" "replaceRegexpOne" "replicate" -"Resample" -"RESET SETTING" -"RESPECT NULLS" -"RESTORE" -"RESTRICT" -"RESTRICTIVE" -"RESUME" "retention" "retentionArgMax" "retentionArgMin" @@ -3569,22 +3961,18 @@ "reverse" "reverseUTF8" "revision" -"REVOKE" "right" -"RIGHT" "rightPad" "rightPadUTF8" "rightUTF8" -"Ring" -"ROLLBACK" -"ROLLUP" "round" "roundAge" "roundBankers" "roundDown" "roundDuration" "roundToExp2" -"ROW" +"rowNumberInAllBlocks" +"rowNumberInBlock" "row_number" "row_numberArgMax" "row_numberArgMin" @@ -3592,8 +3980,6 @@ "row_numberDistinct" "row_numberForEach" "row_numberIf" -"rowNumberInAllBlocks" -"rowNumberInBlock" "row_numberMap" "row_numberMerge" "row_numberNull" @@ -3602,15 +3988,12 @@ "row_numberResample" "row_numberSimpleState" "row_numberState" -"ROWS" "rpad" "rtrim" -"RTRIM" "runningAccumulate" "runningConcurrency" "runningDifference" "runningDifferenceStartingWithFirstValue" -"S" "s2CapContains" "s2CapUnion" "s2CellsIntersect" @@ -3620,18 +4003,7 @@ "s2RectIntersection" "s2RectUnion" "s2ToGeo" -"S3" -"SALT" -"SAMPLE" -"SAMPLE BY" "scalarProduct" -"__scalarSubqueryResult" -"SCHEMA" -"SCHEME" -"SECOND" -"SECONDS" -"SELECT" -"SEMI" "sequenceCount" "sequenceCountArgMax" "sequenceCountArgMin" @@ -3680,44 +4052,14 @@ "seriesDecomposeSTL" "seriesOutliersDetectTukey" "seriesPeriodDetectFFT" -"SERVER" -"serverTimezone" "serverTimeZone" +"serverTimezone" "serverUUID" -"SET" -"SET DEFAULT" -"SET DEFAULT ROLE" -"SET FAKE TIME" -"SET NULL" -"SET ROLE" -"SET ROLE DEFAULT" -"SETTINGS" -"SET TRANSACTION SNAPSHOT" -"SHA1" -"SHA224" -"SHA256" -"SHA256_HASH" -"SHA256_PASSWORD" -"SHA384" -"SHA512" -"SHA512_256" "shardCount" "shardNum" -"SHOW" -"SHOW ACCESS" "showCertificate" -"SHOW CREATE" -"SHOW ENGINES" -"SHOW FUNCTIONS" -"SHOW GRANTS" -"SHOW PRIVILEGES" -"SHOW PROCESSLIST" -"SHOW SETTING" "sigmoid" "sign" -"SIGNED" -"SIMPLE" -"SimpleAggregateFunction" "simpleJSONExtractBool" "simpleJSONExtractFloat" "simpleJSONExtractInt" @@ -3741,7 +4083,6 @@ "simpleLinearRegressionSimpleState" "simpleLinearRegressionState" "sin" -"SINGLE" "singleValueOrNull" "singleValueOrNullArgMax" "singleValueOrNullArgMin" @@ -3796,45 +4137,42 @@ "skewSampState" "sleep" "sleepEachRow" -"SMALLINT" -"SMALLINT SIGNED" -"SMALLINT UNSIGNED" +"snowflakeIDToDateTime" +"snowflakeIDToDateTime64" "snowflakeToDateTime" "snowflakeToDateTime64" "soundex" -"SOURCE" "space" -"sparkbar" "sparkBar" -"sparkbarArgMax" "sparkBarArgMax" -"sparkbarArgMin" "sparkBarArgMin" -"sparkbarArray" "sparkBarArray" -"sparkbarDistinct" "sparkBarDistinct" -"sparkbarForEach" "sparkBarForEach" -"sparkbarIf" "sparkBarIf" -"sparkbarMap" "sparkBarMap" -"sparkbarMerge" "sparkBarMerge" -"sparkbarNull" "sparkBarNull" -"sparkbarOrDefault" "sparkBarOrDefault" -"sparkbarOrNull" "sparkBarOrNull" -"sparkbarResample" "sparkBarResample" -"sparkbarSimpleState" "sparkBarSimpleState" -"sparkbarState" "sparkBarState" -"SPATIAL" +"sparkbar" +"sparkbarArgMax" +"sparkbarArgMin" +"sparkbarArray" +"sparkbarDistinct" +"sparkbarForEach" +"sparkbarIf" +"sparkbarMap" +"sparkbarMerge" +"sparkbarNull" +"sparkbarOrDefault" +"sparkbarOrNull" +"sparkbarResample" +"sparkbarSimpleState" +"sparkbarState" "splitByAlpha" "splitByChar" "splitByNonAlpha" @@ -3844,58 +4182,23 @@ "sqid" "sqidDecode" "sqidEncode" -"SQL SECURITY" -"SQL_TSI_DAY" -"SQL_TSI_HOUR" -"SQL_TSI_MICROSECOND" -"SQL_TSI_MILLISECOND" -"SQL_TSI_MINUTE" -"SQL_TSI_MONTH" -"SQL_TSI_NANOSECOND" -"SQL_TSI_QUARTER" -"SQL_TSI_SECOND" -"SQL_TSI_WEEK" -"SQL_TSI_YEAR" "sqrt" -"SS" -"SSH_KEY" -"SSL_CERTIFICATE" "startsWith" "startsWithUTF8" -"State" -"STATISTIC" -"STD" -"STDArgMax" -"STDArgMin" -"STDArray" "stddevPop" -"STDDEV_POP" "stddevPopArgMax" -"STDDEV_POPArgMax" "stddevPopArgMin" -"STDDEV_POPArgMin" "stddevPopArray" -"STDDEV_POPArray" "stddevPopDistinct" -"STDDEV_POPDistinct" "stddevPopForEach" -"STDDEV_POPForEach" "stddevPopIf" -"STDDEV_POPIf" "stddevPopMap" -"STDDEV_POPMap" "stddevPopMerge" -"STDDEV_POPMerge" "stddevPopNull" -"STDDEV_POPNull" "stddevPopOrDefault" -"STDDEV_POPOrDefault" "stddevPopOrNull" -"STDDEV_POPOrNull" "stddevPopResample" -"STDDEV_POPResample" "stddevPopSimpleState" -"STDDEV_POPSimpleState" "stddevPopStable" "stddevPopStableArgMax" "stddevPopStableArgMin" @@ -3912,35 +4215,20 @@ "stddevPopStableSimpleState" "stddevPopStableState" "stddevPopState" -"STDDEV_POPState" "stddevSamp" -"STDDEV_SAMP" "stddevSampArgMax" -"STDDEV_SAMPArgMax" "stddevSampArgMin" -"STDDEV_SAMPArgMin" "stddevSampArray" -"STDDEV_SAMPArray" "stddevSampDistinct" -"STDDEV_SAMPDistinct" "stddevSampForEach" -"STDDEV_SAMPForEach" "stddevSampIf" -"STDDEV_SAMPIf" "stddevSampMap" -"STDDEV_SAMPMap" "stddevSampMerge" -"STDDEV_SAMPMerge" "stddevSampNull" -"STDDEV_SAMPNull" "stddevSampOrDefault" -"STDDEV_SAMPOrDefault" "stddevSampOrNull" -"STDDEV_SAMPOrNull" "stddevSampResample" -"STDDEV_SAMPResample" "stddevSampSimpleState" -"STDDEV_SAMPSimpleState" "stddevSampStable" "stddevSampStableArgMax" "stddevSampStableArgMin" @@ -3957,20 +4245,7 @@ "stddevSampStableSimpleState" "stddevSampStableState" "stddevSampState" -"STDDEV_SAMPState" -"STDDistinct" -"STDForEach" -"STDIf" -"STDMap" -"STDMerge" -"STDNull" -"STDOrDefault" -"STDOrNull" -"STDResample" -"STDSimpleState" -"STDState" "stem" -"STEP" "stochasticLinearRegression" "stochasticLinearRegressionArgMax" "stochasticLinearRegressionArgMin" @@ -4001,15 +4276,11 @@ "stochasticLogisticRegressionResample" "stochasticLogisticRegressionSimpleState" "stochasticLogisticRegressionState" -"STORAGE" -"STRICT" -"STRICTLY_ASCENDING" -"String" +"str_to_date" +"str_to_map" "stringJaccardIndex" "stringJaccardIndexUTF8" "stringToH3" -"str_to_date" -"str_to_map" "structureToCapnProtoSchema" "structureToProtobufSchema" "studentTTest" @@ -4029,14 +4300,9 @@ "studentTTestState" "subBitmap" "subDate" -"SUBPARTITION" -"SUBPARTITION BY" -"SUBPARTITIONS" "substr" "substring" -"SUBSTRING" "substringIndex" -"SUBSTRING_INDEX" "substringIndexUTF8" "substringUTF8" "subtractDays" @@ -4120,21 +4386,6 @@ "sumMapFilteredWithOverflowResample" "sumMapFilteredWithOverflowSimpleState" "sumMapFilteredWithOverflowState" -"sumMappedArrays" -"sumMappedArraysArgMax" -"sumMappedArraysArgMin" -"sumMappedArraysArray" -"sumMappedArraysDistinct" -"sumMappedArraysForEach" -"sumMappedArraysIf" -"sumMappedArraysMap" -"sumMappedArraysMerge" -"sumMappedArraysNull" -"sumMappedArraysOrDefault" -"sumMappedArraysOrNull" -"sumMappedArraysResample" -"sumMappedArraysSimpleState" -"sumMappedArraysState" "sumMapWithOverflow" "sumMapWithOverflowArgMax" "sumMapWithOverflowArgMin" @@ -4150,6 +4401,21 @@ "sumMapWithOverflowResample" "sumMapWithOverflowSimpleState" "sumMapWithOverflowState" +"sumMappedArrays" +"sumMappedArraysArgMax" +"sumMappedArraysArgMin" +"sumMappedArraysArray" +"sumMappedArraysDistinct" +"sumMappedArraysForEach" +"sumMappedArraysIf" +"sumMappedArraysMap" +"sumMappedArraysMerge" +"sumMappedArraysNull" +"sumMappedArraysOrDefault" +"sumMappedArraysOrNull" +"sumMappedArraysResample" +"sumMappedArraysSimpleState" +"sumMappedArraysState" "sumMerge" "sumNull" "sumOrDefault" @@ -4172,23 +4438,11 @@ "sumWithOverflowResample" "sumWithOverflowSimpleState" "sumWithOverflowState" -"SUSPEND" "svg" -"SVG" -"SYNC" "synonyms" -"SYNTAX" -"SYSTEM" -"TABLE" -"TABLE OVERRIDE" -"TABLES" "tan" "tanh" "tcpPort" -"TEMPORARY" -"TEMPORARY TABLE" -"TEST" -"TEXT" "tgamma" "theilsU" "theilsUArgMax" @@ -4205,36 +4459,20 @@ "theilsUResample" "theilsUSimpleState" "theilsUState" -"THEN" "throwIf" "tid" -"TIME" "timeDiff" "timeSlot" "timeSlots" -"timestamp" -"TIMESTAMP" -"TIMESTAMP_ADD" -"TIMESTAMPADD" -"timestamp_diff" -"timestampDiff" -"TIMESTAMP_DIFF" -"TIMESTAMPDIFF" -"TIMESTAMP_SUB" -"TIMESTAMPSUB" -"timezone" "timeZone" -"timezoneOf" "timeZoneOf" -"timezoneOffset" "timeZoneOffset" -"TINYBLOB" -"TINYINT" -"TINYINT SIGNED" -"TINYINT UNSIGNED" -"TINYTEXT" -"TO" -"TO_BASE64" +"timestamp" +"timestampDiff" +"timestamp_diff" +"timezone" +"timezoneOf" +"timezoneOffset" "toBool" "toColumnTypeName" "toDate" @@ -4254,11 +4492,9 @@ "toDateTimeOrDefault" "toDateTimeOrNull" "toDateTimeOrZero" -"today" "toDayOfMonth" "toDayOfWeek" "toDayOfYear" -"TO_DAYS" "toDaysSinceYearZero" "toDecimal128" "toDecimal128OrDefault" @@ -4277,7 +4513,6 @@ "toDecimal64OrNull" "toDecimal64OrZero" "toDecimalString" -"TO DISK" "toFixedString" "toFloat32" "toFloat32OrDefault" @@ -4288,7 +4523,16 @@ "toFloat64OrNull" "toFloat64OrZero" "toHour" -"TO INNER UUID" +"toIPv4" +"toIPv4OrDefault" +"toIPv4OrNull" +"toIPv4OrZero" +"toIPv6" +"toIPv6OrDefault" +"toIPv6OrNull" +"toIPv6OrZero" +"toISOWeek" +"toISOYear" "toInt128" "toInt128OrDefault" "toInt128OrNull" @@ -4324,18 +4568,7 @@ "toIntervalSecond" "toIntervalWeek" "toIntervalYear" -"toIPv4" -"toIPv4OrDefault" -"toIPv4OrNull" -"toIPv4OrZero" -"toIPv6" -"toIPv6OrDefault" -"toIPv6OrNull" -"toIPv6OrZero" -"toISOWeek" -"toISOYear" "toJSONString" -"tokens" "toLastDayOfMonth" "toLastDayOfWeek" "toLowCardinality" @@ -4346,7 +4579,82 @@ "toMonday" "toMonth" "toNullable" -"TOP" +"toQuarter" +"toRelativeDayNum" +"toRelativeHourNum" +"toRelativeMinuteNum" +"toRelativeMonthNum" +"toRelativeQuarterNum" +"toRelativeSecondNum" +"toRelativeWeekNum" +"toRelativeYearNum" +"toSecond" +"toStartOfDay" +"toStartOfFifteenMinutes" +"toStartOfFiveMinute" +"toStartOfFiveMinutes" +"toStartOfHour" +"toStartOfISOYear" +"toStartOfInterval" +"toStartOfMicrosecond" +"toStartOfMillisecond" +"toStartOfMinute" +"toStartOfMonth" +"toStartOfNanosecond" +"toStartOfQuarter" +"toStartOfSecond" +"toStartOfTenMinutes" +"toStartOfWeek" +"toStartOfYear" +"toString" +"toStringCutToZero" +"toTime" +"toTimeZone" +"toTimezone" +"toTypeName" +"toUInt128" +"toUInt128OrDefault" +"toUInt128OrNull" +"toUInt128OrZero" +"toUInt16" +"toUInt16OrDefault" +"toUInt16OrNull" +"toUInt16OrZero" +"toUInt256" +"toUInt256OrDefault" +"toUInt256OrNull" +"toUInt256OrZero" +"toUInt32" +"toUInt32OrDefault" +"toUInt32OrNull" +"toUInt32OrZero" +"toUInt64" +"toUInt64OrDefault" +"toUInt64OrNull" +"toUInt64OrZero" +"toUInt8" +"toUInt8OrDefault" +"toUInt8OrNull" +"toUInt8OrZero" +"toUTCTimestamp" +"toUUID" +"toUUIDOrDefault" +"toUUIDOrNull" +"toUUIDOrZero" +"toUnixTimestamp" +"toUnixTimestamp64Micro" +"toUnixTimestamp64Milli" +"toUnixTimestamp64Nano" +"toValidUTF8" +"toWeek" +"toYYYYMM" +"toYYYYMMDD" +"toYYYYMMDDhhmmss" +"toYear" +"toYearWeek" +"to_utc_timestamp" +"today" +"tokens" "topK" "topKArgMax" "topKArgMin" @@ -4379,115 +4687,28 @@ "topKWeightedState" "topLevelDomain" "topLevelDomainRFC" -"toQuarter" -"toRelativeDayNum" -"toRelativeHourNum" -"toRelativeMinuteNum" -"toRelativeMonthNum" -"toRelativeQuarterNum" -"toRelativeSecondNum" -"toRelativeWeekNum" -"toRelativeYearNum" -"toSecond" -"TO SHARD" -"toStartOfDay" -"toStartOfFifteenMinutes" -"toStartOfFiveMinute" -"toStartOfFiveMinutes" -"toStartOfHour" -"toStartOfInterval" -"toStartOfISOYear" -"toStartOfMicrosecond" -"toStartOfMillisecond" -"toStartOfMinute" -"toStartOfMonth" -"toStartOfNanosecond" -"toStartOfQuarter" -"toStartOfSecond" -"toStartOfTenMinutes" -"toStartOfWeek" -"toStartOfYear" -"toString" -"toStringCutToZero" -"TO TABLE" -"TOTALS" -"toTime" -"toTimezone" -"toTimeZone" -"toTypeName" -"toUInt128" -"toUInt128OrDefault" -"toUInt128OrNull" -"toUInt128OrZero" -"toUInt16" -"toUInt16OrDefault" -"toUInt16OrNull" -"toUInt16OrZero" -"toUInt256" -"toUInt256OrDefault" -"toUInt256OrNull" -"toUInt256OrZero" -"toUInt32" -"toUInt32OrDefault" -"toUInt32OrNull" -"toUInt32OrZero" -"toUInt64" -"toUInt64OrDefault" -"toUInt64OrNull" -"toUInt64OrZero" -"toUInt8" -"toUInt8OrDefault" -"toUInt8OrNull" -"toUInt8OrZero" -"TO_UNIXTIME" -"toUnixTimestamp" -"toUnixTimestamp64Micro" -"toUnixTimestamp64Milli" -"toUnixTimestamp64Nano" -"to_utc_timestamp" -"toUTCTimestamp" -"toUUID" -"toUUIDOrDefault" -"toUUIDOrNull" -"toUUIDOrZero" -"toValidUTF8" -"TO VOLUME" -"toWeek" -"toYear" -"toYearWeek" -"toYYYYMM" -"toYYYYMMDD" -"toYYYYMMDDhhmmss" -"TRACKING ONLY" -"TRAILING" -"TRANSACTION" "transactionID" "transactionLatestSnapshot" "transactionOldestSnapshot" "transform" "translate" "translateUTF8" -"TRIGGER" "trim" -"TRIM" "trimBoth" "trimLeft" "trimRight" -"TRUE" "trunc" "truncate" -"TRUNCATE" "tryBase58Decode" "tryBase64Decode" +"tryBase64URLDecode" "tryDecrypt" "tryIdnaEncode" "tryPunycodeDecode" -"TTL" "tumble" "tumbleEnd" "tumbleStart" "tuple" -"Tuple" "tupleConcat" "tupleDivide" "tupleDivideByNumber" @@ -4502,25 +4723,13 @@ "tupleModuloByNumber" "tupleMultiply" "tupleMultiplyByNumber" +"tupleNames" "tupleNegate" "tuplePlus" "tupleToNameValuePairs" -"TYPE" -"TYPEOF" "ucase" -"UInt128" -"UInt16" -"UInt256" -"UInt32" -"UInt64" -"UInt8" -"ULIDStringToDateTime" "unbin" -"UNBOUNDED" -"UNDROP" -"UNFREEZE" "unhex" -"UNION" "uniq" "uniqArgMax" "uniqArgMin" @@ -4614,7 +4823,6 @@ "uniqThetaSimpleState" "uniqThetaState" "uniqThetaUnion" -"UNIQUE" "uniqUpTo" "uniqUpToArgMax" "uniqUpToArgMin" @@ -4630,61 +4838,25 @@ "uniqUpToResample" "uniqUpToSimpleState" "uniqUpToState" -"UNSET FAKE TIME" -"UNSIGNED" -"UPDATE" "upper" "upperUTF8" "uptime" -"URL" -"URLHash" -"URLHierarchy" -"URLPathHierarchy" -"USE" "user" -"USING" -"UTC_timestamp" -"UTCTimestamp" -"UUID" -"UUIDNumToString" -"UUIDStringToNum" "validateNestedArraySizes" -"VALID UNTIL" -"VALUES" -"VARBINARY" -"VARCHAR" -"VARCHAR2" -"Variant" -"variantElement" -"variantType" "varPop" -"VAR_POP" "varPopArgMax" -"VAR_POPArgMax" "varPopArgMin" -"VAR_POPArgMin" "varPopArray" -"VAR_POPArray" "varPopDistinct" -"VAR_POPDistinct" "varPopForEach" -"VAR_POPForEach" "varPopIf" -"VAR_POPIf" "varPopMap" -"VAR_POPMap" "varPopMerge" -"VAR_POPMerge" "varPopNull" -"VAR_POPNull" "varPopOrDefault" -"VAR_POPOrDefault" "varPopOrNull" -"VAR_POPOrNull" "varPopResample" -"VAR_POPResample" "varPopSimpleState" -"VAR_POPSimpleState" "varPopStable" "varPopStableArgMax" "varPopStableArgMin" @@ -4701,35 +4873,20 @@ "varPopStableSimpleState" "varPopStableState" "varPopState" -"VAR_POPState" "varSamp" -"VAR_SAMP" "varSampArgMax" -"VAR_SAMPArgMax" "varSampArgMin" -"VAR_SAMPArgMin" "varSampArray" -"VAR_SAMPArray" "varSampDistinct" -"VAR_SAMPDistinct" "varSampForEach" -"VAR_SAMPForEach" "varSampIf" -"VAR_SAMPIf" "varSampMap" -"VAR_SAMPMap" "varSampMerge" -"VAR_SAMPMerge" "varSampNull" -"VAR_SAMPNull" "varSampOrDefault" -"VAR_SAMPOrDefault" "varSampOrNull" -"VAR_SAMPOrNull" "varSampResample" -"VAR_SAMPResample" "varSampSimpleState" -"VAR_SAMPSimpleState" "varSampStable" "varSampStableArgMax" "varSampStableArgMin" @@ -4746,13 +4903,11 @@ "varSampStableSimpleState" "varSampStableState" "varSampState" -"VAR_SAMPState" -"VARYING" +"variantElement" +"variantType" "vectorDifference" "vectorSum" "version" -"VIEW" -"VISIBLE" "visibleWidth" "visitParamExtractBool" "visitParamExtractFloat" @@ -4761,11 +4916,7 @@ "visitParamExtractString" "visitParamExtractUInt" "visitParamHas" -"WATCH" -"WATERMARK" "week" -"WEEK" -"WEEKS" "welchTTest" "welchTTestArgMax" "welchTTestArgMin" @@ -4781,11 +4932,8 @@ "welchTTestResample" "welchTTestSimpleState" "welchTTestState" -"WHEN" -"WHERE" -"width_bucket" "widthBucket" -"WINDOW" +"width_bucket" "windowFunnel" "windowFunnelArgMax" "windowFunnelArgMin" @@ -4802,16 +4950,6 @@ "windowFunnelSimpleState" "windowFunnelState" "windowID" -"WITH" -"WITH ADMIN OPTION" -"WITH CHECK" -"WITH FILL" -"WITH GRANT OPTION" -"with_itemindex" -"WITH NAME" -"WITH REPLACE OPTION" -"WITH TIES" -"WK" "wkt" "wordShingleMinHash" "wordShingleMinHashArg" @@ -4825,23 +4963,12 @@ "wordShingleSimHashCaseInsensitive" "wordShingleSimHashCaseInsensitiveUTF8" "wordShingleSimHashUTF8" -"WRITABLE" -"WW" "wyHash64" "xor" -"xxh3" "xxHash32" "xxHash64" +"xxh3" "yandexConsistentHash" -"YEAR" -"YEARS" "yearweek" "yesterday" -"YY" -"YYYY" -"YYYYMMDDhhmmssToDateTime" -"YYYYMMDDhhmmssToDateTime64" -"YYYYMMDDToDate" -"YYYYMMDDToDate32" -"ZKPATH" "zookeeperSessionUptime" diff --git a/tests/fuzz/dictionaries/datatypes.dict b/tests/fuzz/dictionaries/datatypes.dict index a01a94fd3e3..797905203b2 100644 --- a/tests/fuzz/dictionaries/datatypes.dict +++ b/tests/fuzz/dictionaries/datatypes.dict @@ -31,6 +31,7 @@ "Decimal256" "Decimal32" "Decimal64" +"Dynamic" "ENUM" "Enum" "Enum16" @@ -74,6 +75,7 @@ "JSON" "LONGBLOB" "LONGTEXT" +"LineString" "LowCardinality" "MEDIUMBLOB" "MEDIUMINT" @@ -81,6 +83,7 @@ "MEDIUMINT UNSIGNED" "MEDIUMTEXT" "Map" +"MultiLineString" "MultiPolygon" "NATIONAL CHAR" "NATIONAL CHAR VARYING" @@ -132,4 +135,3 @@ "YEAR" "bool" "boolean" -"Dynamic" diff --git a/tests/fuzz/dictionaries/functions.dict b/tests/fuzz/dictionaries/functions.dict index 6f2a88c22fa..e562595fb67 100644 --- a/tests/fuzz/dictionaries/functions.dict +++ b/tests/fuzz/dictionaries/functions.dict @@ -126,6 +126,7 @@ "JSONHas" "JSONKey" "JSONLength" +"JSONMergePatch" "JSONType" "JSON_ARRAY_LENGTH" "JSON_EXISTS" @@ -227,6 +228,8 @@ "UTC_timestamp" "UUIDNumToString" "UUIDStringToNum" +"UUIDToNum" +"UUIDv7ToDateTime" "VAR_POP" "VAR_POPArgMax" "VAR_POPArgMin" @@ -263,6 +266,7 @@ "YYYYMMDDhhmmssToDateTime" "YYYYMMDDhhmmssToDateTime64" "_CAST" +"__actionName" "__bitBoolMaskAnd" "__bitBoolMaskOr" "__bitSwapLastTwo" @@ -660,6 +664,8 @@ "base58Encode" "base64Decode" "base64Encode" +"base64URLDecode" +"base64URLEncode" "basename" "bin" "bitAnd" @@ -744,8 +750,15 @@ "cbrt" "ceil" "ceiling" +"changeDay" +"changeHour" +"changeMinute" +"changeMonth" +"changeSecond" +"changeYear" "char" "cityHash64" +"clamp" "coalesce" "concat" "concatAssumeInjective" @@ -970,6 +983,7 @@ "current_date" "current_schemas" "current_timestamp" +"current_user" "cutFragment" "cutIPv6" "cutQueryString" @@ -988,7 +1002,9 @@ "dateDiff" "dateName" "dateTime64ToSnowflake" +"dateTime64ToSnowflakeID" "dateTimeToSnowflake" +"dateTimeToSnowflakeID" "dateTrunc" "date_diff" "decodeHTMLComponent" @@ -1032,6 +1048,21 @@ "deltaSumTimestampSimpleState" "deltaSumTimestampState" "demangle" +"denseRank" +"denseRankArgMax" +"denseRankArgMin" +"denseRankArray" +"denseRankDistinct" +"denseRankForEach" +"denseRankIf" +"denseRankMap" +"denseRankMerge" +"denseRankNull" +"denseRankOrDefault" +"denseRankOrNull" +"denseRankResample" +"denseRankSimpleState" +"denseRankState" "dense_rank" "dense_rankArgMax" "dense_rankArgMin" @@ -1108,8 +1139,11 @@ "domainWithoutWWWRFC" "dotProduct" "dumpColumnStructure" +"dynamicElement" +"dynamicType" "e" "editDistance" +"editDistanceUTF8" "empty" "emptyArrayDate" "emptyArrayDateTime" @@ -1334,14 +1368,17 @@ "gccMurmurHash" "gcd" "generateRandomStructure" +"generateSnowflakeID" "generateULID" "generateUUIDv4" +"generateUUIDv7" "geoDistance" "geoToH3" "geoToS2" "geohashDecode" "geohashEncode" "geohashesInBox" +"getClientHTTPHeader" "getMacro" "getOSKernelVersion" "getServerPort" @@ -1589,6 +1626,20 @@ "groupBitmapXorSimpleState" "groupBitmapXorState" "groupConcat" +"groupConcatArgMax" +"groupConcatArgMin" +"groupConcatArray" +"groupConcatDistinct" +"groupConcatForEach" +"groupConcatIf" +"groupConcatMap" +"groupConcatMerge" +"groupConcatNull" +"groupConcatOrDefault" +"groupConcatOrNull" +"groupConcatResample" +"groupConcatSimpleState" +"groupConcatState" "groupUniqArray" "groupUniqArrayArgMax" "groupUniqArrayArgMin" @@ -1604,6 +1655,21 @@ "groupUniqArrayResample" "groupUniqArraySimpleState" "groupUniqArrayState" +"group_concat" +"group_concatArgMax" +"group_concatArgMin" +"group_concatArray" +"group_concatDistinct" +"group_concatForEach" +"group_concatIf" +"group_concatMap" +"group_concatMerge" +"group_concatNull" +"group_concatOrDefault" +"group_concatOrNull" +"group_concatResample" +"group_concatSimpleState" +"group_concatState" "h3CellAreaM2" "h3CellAreaRads2" "h3Distance" @@ -1660,6 +1726,8 @@ "hasTokenCaseInsensitiveOrNull" "hasTokenOrNull" "hex" +"hilbertDecode" +"hilbertEncode" "histogram" "histogramArgMax" "histogramArgMin" @@ -1881,6 +1949,7 @@ "less" "lessOrEquals" "levenshteinDistance" +"levenshteinDistanceUTF8" "lgamma" "like" "ln" @@ -2498,10 +2567,44 @@ "parseDateTimeInJodaSyntaxOrZero" "parseDateTimeOrNull" "parseDateTimeOrZero" +"parseReadableSize" +"parseReadableSizeOrNull" +"parseReadableSizeOrZero" "parseTimeDelta" +"partitionID" "partitionId" "path" "pathFull" +"percentRank" +"percentRankArgMax" +"percentRankArgMin" +"percentRankArray" +"percentRankDistinct" +"percentRankForEach" +"percentRankIf" +"percentRankMap" +"percentRankMerge" +"percentRankNull" +"percentRankOrDefault" +"percentRankOrNull" +"percentRankResample" +"percentRankSimpleState" +"percentRankState" +"percent_rank" +"percent_rankArgMax" +"percent_rankArgMin" +"percent_rankArray" +"percent_rankDistinct" +"percent_rankForEach" +"percent_rankIf" +"percent_rankMap" +"percent_rankMerge" +"percent_rankNull" +"percent_rankOrDefault" +"percent_rankOrNull" +"percent_rankResample" +"percent_rankSimpleState" +"percent_rankState" "pi" "plus" "pmod" @@ -2533,6 +2636,7 @@ "positive_modulo" "pow" "power" +"printf" "proportionsZTest" "protocol" "punycodeDecode" @@ -3103,6 +3207,8 @@ "rankResample" "rankSimpleState" "rankState" +"readWKTLineString" +"readWKTMultiLineString" "readWKTMultiPolygon" "readWKTPoint" "readWKTPolygon" @@ -3340,6 +3446,8 @@ "skewSampState" "sleep" "sleepEachRow" +"snowflakeIDToDateTime" +"snowflakeIDToDateTime64" "snowflakeToDateTime" "snowflakeToDateTime64" "soundex" @@ -3902,6 +4010,7 @@ "truncate" "tryBase58Decode" "tryBase64Decode" +"tryBase64URLDecode" "tryDecrypt" "tryIdnaEncode" "tryPunycodeDecode" @@ -3923,6 +4032,7 @@ "tupleModuloByNumber" "tupleMultiply" "tupleMultiplyByNumber" +"tupleNames" "tupleNegate" "tuplePlus" "tupleToNameValuePairs" diff --git a/tests/fuzz/dictionaries/key_words.dict b/tests/fuzz/dictionaries/keywords.dict similarity index 95% rename from tests/fuzz/dictionaries/key_words.dict rename to tests/fuzz/dictionaries/keywords.dict index db517a2382c..abaaf9e53b5 100644 --- a/tests/fuzz/dictionaries/key_words.dict +++ b/tests/fuzz/dictionaries/keywords.dict @@ -3,7 +3,7 @@ "ADD CONSTRAINT" "ADD INDEX" "ADD PROJECTION" -"ADD STATISTIC" +"ADD STATISTICS" "ADMIN OPTION FOR" "AFTER" "ALGORITHM" @@ -76,7 +76,7 @@ "CLEAR COLUMN" "CLEAR INDEX" "CLEAR PROJECTION" -"CLEAR STATISTIC" +"CLEAR STATISTICS" "CLUSTER" "CLUSTERS" "CN" @@ -110,6 +110,8 @@ "CURRENTUSER" "CURRENT_USER" "D" +"DATA" +"DATA INNER UUID" "DATABASE" "DATABASES" "DATE" @@ -147,7 +149,7 @@ "DROP PART" "DROP PARTITION" "DROP PROJECTION" -"DROP STATISTIC" +"DROP STATISTICS" "DROP TABLE" "DROP TEMPORARY TABLE" "ELSE" @@ -247,6 +249,7 @@ "IS NULL" "IS_OBJECT_ID" "JOIN" +"JWT" "KERBEROS" "KEY" "KEY BY" @@ -277,13 +280,15 @@ "MATERIALIZE COLUMN" "MATERIALIZE INDEX" "MATERIALIZE PROJECTION" -"MATERIALIZE STATISTIC" +"MATERIALIZE STATISTICS" "MATERIALIZE TTL" "MATERIALIZED" "MAX" "MCS" "MEMORY" "MERGES" +"METRICS" +"METRICS INNER UUID" "MI" "MICROSECOND" "MICROSECONDS" @@ -297,12 +302,14 @@ "MODIFY" "MODIFY COLUMN" "MODIFY COMMENT" +"MODIFY DEFINER" "MODIFY ORDER BY" "MODIFY QUERY" "MODIFY REFRESH" "MODIFY SAMPLE BY" "MODIFY SETTING" "MODIFY SQL SECURITY" +"MODIFY STATISTICS" "MODIFY TTL" "MONTH" "MONTHS" @@ -373,6 +380,7 @@ "Protobuf" "Q" "QQ" +"QUALIFY" "QUARTER" "QUARTERS" "QUERY" @@ -384,6 +392,7 @@ "READONLY" "REALM" "RECOMPRESS" +"RECURSIVE" "REFERENCES" "REFRESH" "REGEXP" @@ -415,6 +424,7 @@ "SALT" "SAMPLE" "SAMPLE BY" +"SAN" "SCHEME" "SECOND" "SECONDS" @@ -460,7 +470,8 @@ "SS" "SSH_KEY" "SSL_CERTIFICATE" -"STATISTIC" +"START TRANSACTION" +"STATISTICS" "STEP" "STORAGE" "STRICT" @@ -475,6 +486,8 @@ "TABLE" "TABLE OVERRIDE" "TABLES" +"TAGS" +"TAGS INNER UUID" "TEMPORARY" "TEMPORARY TABLE" "TEST" @@ -529,6 +542,7 @@ "WITH NAME" "WITH REPLACE OPTION" "WITH TIES" +"WITH_ITEMINDEX" "WK" "WRITABLE" "WW" @@ -540,4 +554,3 @@ "bagexpansion" "base_backup" "cluster_host_ids" -"with_itemindex" diff --git a/utils/libfuzzer/runner.py b/tests/fuzz/runner.py similarity index 75% rename from utils/libfuzzer/runner.py rename to tests/fuzz/runner.py index bbe648dbbc2..44259228f60 100644 --- a/utils/libfuzzer/runner.py +++ b/tests/fuzz/runner.py @@ -11,7 +11,7 @@ FUZZER_ARGS = os.getenv("FUZZER_ARGS", "") def run_fuzzer(fuzzer: str): - logging.info(f"Running fuzzer {fuzzer}...") + logging.info("Running fuzzer %s...", fuzzer) corpus_dir = f"{fuzzer}.in" with Path(corpus_dir) as path: @@ -29,28 +29,27 @@ def run_fuzzer(fuzzer: str): if parser.has_section("asan"): os.environ["ASAN_OPTIONS"] = ( - f"{os.environ['ASAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['asan'].items())}" + f"{os.environ['ASAN_OPTIONS']}:{':'.join(f'{key}={value}' for key, value in parser['asan'].items())}" ) if parser.has_section("msan"): os.environ["MSAN_OPTIONS"] = ( - f"{os.environ['MSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['msan'].items())}" + f"{os.environ['MSAN_OPTIONS']}:{':'.join(f'{key}={value}' for key, value in parser['msan'].items())}" ) if parser.has_section("ubsan"): os.environ["UBSAN_OPTIONS"] = ( - f"{os.environ['UBSAN_OPTIONS']}:{':'.join('%s=%s' % (key, value) for key, value in parser['ubsan'].items())}" + f"{os.environ['UBSAN_OPTIONS']}:{':'.join(f'{key}={value}' for key, value in parser['ubsan'].items())}" ) if parser.has_section("libfuzzer"): custom_libfuzzer_options = " ".join( - "-%s=%s" % (key, value) - for key, value in parser["libfuzzer"].items() + f"-{key}={value}" for key, value in parser["libfuzzer"].items() ) if parser.has_section("fuzzer_arguments"): fuzzer_arguments = " ".join( - ("%s" % key) if value == "" else ("%s=%s" % (key, value)) + (f"{key}") if value == "" else (f"{key}={value}") for key, value in parser["fuzzer_arguments"].items() ) @@ -65,7 +64,7 @@ def run_fuzzer(fuzzer: str): cmd_line += " < /dev/null" - logging.info(f"...will execute: {cmd_line}") + logging.info("...will execute: %s", cmd_line) subprocess.check_call(cmd_line, shell=True) diff --git a/tests/fuzz/tcp_protocol_fuzzer.options b/tests/fuzz/tcp_protocol_fuzzer.options new file mode 100644 index 00000000000..4885669d91d --- /dev/null +++ b/tests/fuzz/tcp_protocol_fuzzer.options @@ -0,0 +1,4 @@ +[fuzzer_arguments] +--log-file=tcp_protocol_fuzzer.log +--= +--logging.terminal=0 diff --git a/tests/fuzz/update_dict.sh b/tests/fuzz/update_dict.sh new file mode 100755 index 00000000000..a83c9167129 --- /dev/null +++ b/tests/fuzz/update_dict.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -euo pipefail + +SCRIPT_DIR=$(dirname "$(realpath "$0")") +ROOT_PATH="$(git rev-parse --show-toplevel)" +CLICKHOUSE_BIN="${CLICKHOUSE_BIN:-$ROOT_PATH/build/programs/clickhouse}" +DICTIONARIES_DIR="$SCRIPT_DIR/dictionaries" + +echo "Generating functions dict" +$CLICKHOUSE_BIN local -q "SELECT * FROM (SELECT DISTINCT concat('\"', name, '\"') as res FROM system.functions ORDER BY name UNION ALL SELECT concat('\"', a.name, b.name, '\"') as res FROM system.functions as a CROSS JOIN system.aggregate_function_combinators as b WHERE a.is_aggregate = 1) ORDER BY res" > "$DICTIONARIES_DIR/functions.dict" + +echo "Generating data types dict" +$CLICKHOUSE_BIN local -q "SELECT DISTINCT concat('\"', name, '\"') as res FROM system.data_type_families ORDER BY name" > "$DICTIONARIES_DIR/datatypes.dict" + +echo "Generating keywords dict" +$CLICKHOUSE_BIN local -q "SELECT DISTINCT concat('\"', keyword, '\"') as res FROM system.keywords ORDER BY keyword" > "$DICTIONARIES_DIR/keywords.dict" + +echo "Merging dictionaries into all.dict" +cat "$DICTIONARIES_DIR"/* | LC_ALL=C sort | uniq > "$SCRIPT_DIR/all.dict" \ No newline at end of file diff --git a/tests/integration/parallel_skip.json b/tests/integration/parallel_skip.json index 99fa626bd1e..fca2126d824 100644 --- a/tests/integration/parallel_skip.json +++ b/tests/integration/parallel_skip.json @@ -162,9 +162,13 @@ "test_storage_kafka/test.py::test_system_kafka_consumers_rebalance_mv", "test_storage_kafka/test.py::test_formats_errors", "test_storage_kafka/test.py::test_multiple_read_in_materialized_views", + "test_storage_kafka/test.py::test_kafka_null_message", + + "test_storage_kafka/test_produce_http_interface.py::test_kafka_produce_http_interface_row_based_format", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_request_new_ticket_after_expiration", "test_storage_kerberized_kafka/test.py::test_kafka_json_as_string_no_kdc", "test_storage_kerberized_kafka/test.py::test_kafka_config_from_sql_named_collection" + ] diff --git a/tests/queries/0_stateless/02864_statistics_exception.reference b/tests/integration/test_async_metrics_in_cgroup/__init__.py similarity index 100% rename from tests/queries/0_stateless/02864_statistics_exception.reference rename to tests/integration/test_async_metrics_in_cgroup/__init__.py diff --git a/tests/integration/test_async_metrics_in_cgroup/test.py b/tests/integration/test_async_metrics_in_cgroup/test.py new file mode 100644 index 00000000000..d9f2e3aaaed --- /dev/null +++ b/tests/integration/test_async_metrics_in_cgroup/test.py @@ -0,0 +1,69 @@ +import pytest + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node1 = cluster.add_instance("node1", stay_alive=True) +node2 = cluster.add_instance("node2", stay_alive=True) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + cluster.start() + yield cluster + finally: + cluster.shutdown() + + +def run_cpu_intensive_task(node): + node.query( + "SELECT sum(*) FROM system.numbers_mt FORMAT Null SETTINGS max_execution_time=10", + ignore_error=True, + ) + + +def get_async_metric(node, metric): + node.query("SYSTEM FLUSH LOGS") + return node.query( + f""" + SELECT max(value) + FROM ( + SELECT toStartOfInterval(event_time, toIntervalSecond(1)) AS t, avg(value) AS value + FROM system.asynchronous_metric_log + WHERE event_time >= now() - 60 AND metric = '{metric}' + GROUP BY t + ) + SETTINGS max_threads = 1 + """ + ).strip("\n") + + +def test_user_cpu_accounting(start_cluster): + if node1.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + + # run query on the other node, its usage shouldn't be accounted by node1 + run_cpu_intensive_task(node2) + + node1_cpu_time = get_async_metric(node1, "OSUserTime") + assert float(node1_cpu_time) < 2 + + # then let's test that we will account cpu time spent by the server itself + node2_cpu_time = get_async_metric(node2, "OSUserTime") + # this check is really weak, but CI is tough place and we cannot guarantee that test process will get many cpu time + assert float(node2_cpu_time) > 2 + + +def test_normalized_user_cpu(start_cluster): + if node1.is_built_with_sanitizer(): + pytest.skip("Disabled for sanitizers") + + # run query on the other node, its usage shouldn't be accounted by node1 + run_cpu_intensive_task(node2) + + node1_cpu_time = get_async_metric(node1, "OSUserTimeNormalized") + assert float(node1_cpu_time) < 1.01 + + node2_cpu_time = get_async_metric(node2, "OSUserTimeNormalized") + assert float(node2_cpu_time) < 1.01 diff --git a/tests/integration/test_broken_projections/config.d/dont_start_broken.xml b/tests/integration/test_broken_projections/config.d/dont_start_broken.xml new file mode 100644 index 00000000000..9603cdc7e3e --- /dev/null +++ b/tests/integration/test_broken_projections/config.d/dont_start_broken.xml @@ -0,0 +1,6 @@ + + + + 0 + + diff --git a/tests/integration/test_broken_projections/test.py b/tests/integration/test_broken_projections/test.py index 162c0dbaa2f..578ff42369c 100644 --- a/tests/integration/test_broken_projections/test.py +++ b/tests/integration/test_broken_projections/test.py @@ -4,6 +4,7 @@ import logging import string import random from helpers.cluster import ClickHouseCluster +from multiprocessing.dummy import Pool cluster = ClickHouseCluster(__file__) @@ -18,6 +19,12 @@ def cluster(): stay_alive=True, with_zookeeper=True, ) + cluster.add_instance( + "node_restart", + main_configs=["config.d/dont_start_broken.xml"], + stay_alive=True, + with_zookeeper=True, + ) logging.info("Starting cluster...") cluster.start() @@ -632,6 +639,49 @@ def test_broken_on_start(cluster): check(node, table_name, 0) +def test_disappeared_projection_on_start(cluster): + node = cluster.instances["node_restart"] + + table_name = "test_disapperead_projection" + create_table(node, table_name, 1) + + node.query(f"SYSTEM STOP MERGES {table_name}") + + insert(node, table_name, 0, 5) + insert(node, table_name, 5, 5) + insert(node, table_name, 10, 5) + insert(node, table_name, 15, 5) + + assert ["all_0_0_0", "all_1_1_0", "all_2_2_0", "all_3_3_0"] == get_parts( + node, table_name + ) + + def drop_projection(): + node.query( + f"ALTER TABLE {table_name} DROP PROJECTION proj2", + settings={"mutations_sync": "0"}, + ) + + p = Pool(2) + p.apply_async(drop_projection) + + for i in range(30): + create_query = node.query(f"SHOW CREATE TABLE {table_name}") + if "proj2" not in create_query: + break + time.sleep(0.5) + + assert "proj2" not in create_query + + # Remove 'proj2' for part all_2_2_0 + break_projection(node, table_name, "proj2", "all_2_2_0", "part") + + node.restart_clickhouse() + + # proj2 is not broken, it doesn't exist, but ok + check(node, table_name, 0, expect_broken_part="proj2", do_check_command=0) + + def test_mutation_with_broken_projection(cluster): node = cluster.instances["node"] diff --git a/tests/integration/test_checking_s3_blobs_paranoid/test.py b/tests/integration/test_checking_s3_blobs_paranoid/test.py index 73f2888ce00..76a0f30f82e 100644 --- a/tests/integration/test_checking_s3_blobs_paranoid/test.py +++ b/tests/integration/test_checking_s3_blobs_paranoid/test.py @@ -708,7 +708,7 @@ def test_no_key_found_disk(cluster, broken_s3): """ SELECT value FROM system.metrics - WHERE metric = 'S3DiskNoKeyErrors' + WHERE metric = 'DiskS3NoSuchKeyErrors' """ ).strip() ) diff --git a/tests/integration/test_cluster_all_replicas/test.py b/tests/integration/test_cluster_all_replicas/test.py index d8bad180e1b..9797db7c498 100644 --- a/tests/integration/test_cluster_all_replicas/test.py +++ b/tests/integration/test_cluster_all_replicas/test.py @@ -21,14 +21,14 @@ def start_cluster(): def test_cluster(start_cluster): assert ( node1.query( - "SELECT hostName() FROM clusterAllReplicas('one_shard_two_nodes', system.one)" + "SELECT hostName() FROM clusterAllReplicas('one_shard_two_nodes', system.one) ORDER BY ALL" ) == "node1\nnode2\n" ) assert set( node1.query( - """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) ORDER BY dummy""" + """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) ORDER BY ALL""" ).splitlines() ) == {"node1\t0", "node2\t0"} @@ -48,7 +48,7 @@ def test_global_in(start_cluster): assert set( node1.query( - """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) where dummy GLOBAL IN u""" + """SELECT hostName(), * FROM clusterAllReplicas("one_shard_two_nodes", system.one) where dummy GLOBAL IN u ORDER BY ALL""" ).splitlines() ) == {"node1\t0", "node2\t0"} @@ -63,7 +63,7 @@ def test_global_in(start_cluster): def test_skip_unavailable_replica(start_cluster, cluster): assert ( node1.query( - f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) settings skip_unavailable_shards=1" + f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) ORDER BY ALL settings skip_unavailable_shards=1" ) == "node1\nnode2\n" ) @@ -81,5 +81,5 @@ def test_error_on_unavailable_replica(start_cluster, cluster): # so when skip_unavailable_shards=0 - any unavailable replica should lead to an error with pytest.raises(QueryRuntimeException): node1.query( - f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) settings skip_unavailable_shards=0" + f"SELECT hostName() FROM clusterAllReplicas('{cluster}', system.one) ORDER BY ALL settings skip_unavailable_shards=0" ) diff --git a/tests/integration/test_delayed_replica_failover/test.py b/tests/integration/test_delayed_replica_failover/test.py index a480ee3f278..f1034e26b25 100644 --- a/tests/integration/test_delayed_replica_failover/test.py +++ b/tests/integration/test_delayed_replica_failover/test.py @@ -20,21 +20,30 @@ node_1_2 = cluster.add_instance("node_1_2", with_zookeeper=True) node_2_1 = cluster.add_instance("node_2_1", with_zookeeper=True) node_2_2 = cluster.add_instance("node_2_2", with_zookeeper=True) +# For test to be runnable multiple times +seqno = 0 + @pytest.fixture(scope="module") def started_cluster(): try: cluster.start() + yield cluster + finally: + cluster.shutdown() + +@pytest.fixture(scope="function", autouse=True) +def create_tables(): + global seqno + try: + seqno += 1 for shard in (1, 2): for replica in (1, 2): node = cluster.instances["node_{}_{}".format(shard, replica)] node.query( - """ -CREATE TABLE replicated (d Date, x UInt32) ENGINE = - ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated', '{instance}') PARTITION BY toYYYYMM(d) ORDER BY d""".format( - shard=shard, instance=node.name - ) + f"CREATE TABLE replicated (d Date, x UInt32) ENGINE = " + f"ReplicatedMergeTree('/clickhouse/tables/{shard}/replicated_{seqno}', '{node.name}') PARTITION BY toYYYYMM(d) ORDER BY d" ) node_1_1.query( @@ -42,10 +51,15 @@ CREATE TABLE replicated (d Date, x UInt32) ENGINE = "Distributed('test_cluster', 'default', 'replicated')" ) - yield cluster + yield finally: - cluster.shutdown() + node_1_1.query("DROP TABLE distributed") + + node_1_1.query("DROP TABLE replicated") + node_1_2.query("DROP TABLE replicated") + node_2_1.query("DROP TABLE replicated") + node_2_2.query("DROP TABLE replicated") def test(started_cluster): @@ -101,7 +115,9 @@ SELECT sum(x) FROM distributed WITH TOTALS SETTINGS # allow pings to zookeeper to timeout (must be greater than ZK session timeout). for _ in range(30): try: - node_2_2.query("SELECT * FROM system.zookeeper where path = '/'") + node_2_2.query( + "SELECT * FROM system.zookeeper where path = '/' SETTINGS insert_keeper_max_retries = 0" + ) time.sleep(0.5) except: break @@ -120,7 +136,7 @@ SELECT sum(x) FROM distributed SETTINGS == "3" ) - # Regression for skip_unavailable_shards in conjunction with skip_unavailable_shards + # Prefer fallback_to_stale_replicas over skip_unavailable_shards assert ( instance_with_dist_table.query( """ diff --git a/tests/integration/test_distributed_type_object/configs/remote_servers.xml b/tests/integration/test_distributed_type_object/configs/remote_servers.xml index ebce4697529..68b420f36b4 100644 --- a/tests/integration/test_distributed_type_object/configs/remote_servers.xml +++ b/tests/integration/test_distributed_type_object/configs/remote_servers.xml @@ -1,4 +1,4 @@ - + @@ -15,4 +15,4 @@ - + diff --git a/tests/integration/test_drop_is_lock_free/test.py b/tests/integration/test_drop_is_lock_free/test.py index 1bb8767a9a0..3855bc21f90 100644 --- a/tests/integration/test_drop_is_lock_free/test.py +++ b/tests/integration/test_drop_is_lock_free/test.py @@ -176,7 +176,7 @@ def test_query_is_permanent(transaction, permanent, exclusive_table): select_handler = node.get_query_request( f""" - SELECT sleepEachRow(3) FROM {exclusive_table} SETTINGS function_sleep_max_microseconds_per_block = 0; + SELECT sleepEachRow(3) FROM {exclusive_table} SETTINGS function_sleep_max_microseconds_per_block = 0, max_threads=1; """, query_id=query_id, ) diff --git a/tests/integration/test_hedged_requests/test.py b/tests/integration/test_hedged_requests/test.py index 02ecf3c1367..0d72f7c45b1 100644 --- a/tests/integration/test_hedged_requests/test.py +++ b/tests/integration/test_hedged_requests/test.py @@ -333,7 +333,7 @@ def test_receive_timeout2(started_cluster): # in packet receiving but there are replicas in process of # connection establishing. update_configs( - node_1_sleep_in_send_data=4000, + node_1_sleep_in_send_data=5000, node_2_sleep_in_send_tables_status=2000, node_3_sleep_in_send_tables_status=2000, ) diff --git a/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml b/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml index 7a2141e6c7e..9329c8dbde2 100644 --- a/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml +++ b/tests/integration/test_host_regexp_hosts_file_resolution/configs/host_regexp.xml @@ -1,4 +1,4 @@ - + @@ -8,4 +8,4 @@ default - \ No newline at end of file + diff --git a/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml b/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml index 58ef55cd3f3..9c27c612f63 100644 --- a/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml +++ b/tests/integration/test_host_regexp_hosts_file_resolution/configs/listen_host.xml @@ -1,5 +1,5 @@ - + :: 0.0.0.0 1 - + diff --git a/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml b/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml index b5c351d105b..fb9acc58ad6 100644 --- a/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml +++ b/tests/integration/test_jbod_ha/configs/config.d/storage_configuration.xml @@ -1,4 +1,4 @@ - + 1000 @@ -27,4 +27,4 @@ - + diff --git a/tests/integration/test_postgresql_replica_database_engine_2/test.py b/tests/integration/test_postgresql_replica_database_engine_2/test.py index 406b50bc486..7fdd17625a9 100644 --- a/tests/integration/test_postgresql_replica_database_engine_2/test.py +++ b/tests/integration/test_postgresql_replica_database_engine_2/test.py @@ -953,12 +953,14 @@ def test_generated_columns(started_cluster): "", f"""CREATE TABLE {table} ( key integer PRIMARY KEY, - x integer, + x integer DEFAULT 0, + temp integer DEFAULT 0, y integer GENERATED ALWAYS AS (x*2) STORED, - z text); + z text DEFAULT 'z'); """, ) + pg_manager.execute(f"alter table {table} drop column temp;") pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") @@ -991,6 +993,44 @@ def test_generated_columns(started_cluster): ) +def test_generated_columns_with_sequence(started_cluster): + table = "test_generated_columns_with_sequence" + + pg_manager.create_postgres_table( + table, + "", + f"""CREATE TABLE {table} ( + key integer PRIMARY KEY, + x integer, + y integer GENERATED ALWAYS AS (x*2) STORED, + z text); + """, + ) + + pg_manager.execute( + f"create sequence {table}_id_seq increment by 1 minvalue 1 start 1;" + ) + pg_manager.execute( + f"alter table {table} alter key set default nextval('{table}_id_seq');" + ) + pg_manager.execute(f"insert into {table} (key, x, z) values (1,1,'1');") + pg_manager.execute(f"insert into {table} (key, x, z) values (2,2,'2');") + + pg_manager.create_materialized_db( + ip=started_cluster.postgres_ip, + port=started_cluster.postgres_port, + settings=[ + f"materialized_postgresql_tables_list = '{table}'", + "materialized_postgresql_backoff_min_ms = 100", + "materialized_postgresql_backoff_max_ms = 100", + ], + ) + + check_tables_are_synchronized( + instance, table, postgres_database=pg_manager.get_default_database() + ) + + def test_default_columns(started_cluster): table = "test_default_columns" @@ -1087,9 +1127,13 @@ def test_dependent_loading(started_cluster): nested_time = instance.query( f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{uuid}_nested' and message not like '%like%'" ).strip() - time = instance.query( - f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" - ).strip() + time = ( + instance.query( + f"SELECT event_time_microseconds FROM system.text_log WHERE message like 'Loading table default.{table}' and message not like '%like%'" + ) + .strip() + .split("\n")[-1] + ) instance.query( f"SELECT toDateTime64('{nested_time}', 6) < toDateTime64('{time}', 6)" ) diff --git a/tests/integration/test_server_keep_alive/__init__.py b/tests/integration/test_server_keep_alive/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml b/tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml new file mode 100644 index 00000000000..06e68044817 --- /dev/null +++ b/tests/integration/test_server_keep_alive/configs/keep_alive_settings.xml @@ -0,0 +1,4 @@ + + 3600 + 5 + diff --git a/tests/integration/test_server_keep_alive/test.py b/tests/integration/test_server_keep_alive/test.py new file mode 100644 index 00000000000..e550319b6df --- /dev/null +++ b/tests/integration/test_server_keep_alive/test.py @@ -0,0 +1,55 @@ +import logging +import pytest +import random +import requests + +from helpers.cluster import ClickHouseCluster + +cluster = ClickHouseCluster(__file__) +node = cluster.add_instance("node", main_configs=["configs/keep_alive_settings.xml"]) + + +@pytest.fixture(scope="module") +def start_cluster(): + try: + logging.info("Starting cluster...") + cluster.start() + logging.info("Cluster started") + + yield cluster + finally: + cluster.shutdown() + + +def test_max_keep_alive_requests_on_user_side(start_cluster): + # In this test we have `keep_alive_timeout` set to one hour to never trigger connection reset by timeout, `max_keep_alive_requests` is set to 5. + # We expect server to close connection after each 5 requests. We detect connection reset by change in src port. + # So the first 5 requests should come from the same port, the following 5 requests should come from another port. + + log_comments = [] + for _ in range(10): + rand_id = random.randint(0, 1000000) + log_comment = f"test_requests_with_keep_alive_{rand_id}" + log_comments.append(log_comment) + log_comments = sorted(log_comments) + + session = requests.Session() + for i in range(10): + session.get( + f"http://{node.ip_address}:8123/?query=select%201&log_comment={log_comments[i]}" + ) + + ports = node.query( + f""" + SYSTEM FLUSH LOGS; + + SELECT port + FROM system.query_log + WHERE log_comment IN ({", ".join(f"'{comment}'" for comment in log_comments)}) AND type = 'QueryFinish' + ORDER BY log_comment + """ + ).split("\n")[:-1] + + expected = 5 * [ports[0]] + [ports[5]] * 5 + + assert ports == expected diff --git a/tests/integration/test_server_reload/configs/default_passwd.xml b/tests/integration/test_server_reload/configs/default_passwd.xml index f79149e7e23..9d664cbf9c4 100644 --- a/tests/integration/test_server_reload/configs/default_passwd.xml +++ b/tests/integration/test_server_reload/configs/default_passwd.xml @@ -1,4 +1,4 @@ - + @@ -9,4 +9,4 @@ 123 - + diff --git a/tests/integration/test_server_reload/configs/overrides_from_zk.xml b/tests/integration/test_server_reload/configs/overrides_from_zk.xml index d420faa88a2..aa6105f6ebe 100644 --- a/tests/integration/test_server_reload/configs/overrides_from_zk.xml +++ b/tests/integration/test_server_reload/configs/overrides_from_zk.xml @@ -1,4 +1,4 @@ - + @@ -7,4 +7,4 @@ - + diff --git a/tests/integration/test_storage_azure_blob_storage/test.py b/tests/integration/test_storage_azure_blob_storage/test.py index 6fbe7634642..fbdc7f29f98 100644 --- a/tests/integration/test_storage_azure_blob_storage/test.py +++ b/tests/integration/test_storage_azure_blob_storage/test.py @@ -5,6 +5,7 @@ import json import logging import os import io +import re import random import threading import time @@ -134,6 +135,7 @@ def test_create_table_connection_string(cluster): Engine = AzureBlobStorage('{cluster.env_variables['AZURITE_CONNECTION_STRING']}', 'cont', 'test_create_connection_string', 'CSV') """, ) + azure_query(node, "DROP TABLE IF EXISTS test_create_table_conn_string") def test_create_table_account_string(cluster): @@ -143,6 +145,7 @@ def test_create_table_account_string(cluster): f"CREATE TABLE test_create_table_account_url (key UInt64, data String) Engine = AzureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," f"'cont', 'test_create_connection_string', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV')", ) + azure_query(node, "DROP TABLE IF EXISTS test_create_table_account_url") def test_simple_write_account_string(cluster): @@ -156,6 +159,7 @@ def test_simple_write_account_string(cluster): azure_query(node, "INSERT INTO test_simple_write VALUES (1, 'a')") print(get_azure_file_content("test_simple_write.csv", port)) assert get_azure_file_content("test_simple_write.csv", port) == '1,"a"\n' + azure_query(node, "DROP TABLE test_simple_write") def test_simple_write_connection_string(cluster): @@ -169,6 +173,7 @@ def test_simple_write_connection_string(cluster): azure_query(node, "INSERT INTO test_simple_write_connection_string VALUES (1, 'a')") print(get_azure_file_content("test_simple_write_c.csv", port)) assert get_azure_file_content("test_simple_write_c.csv", port) == '1,"a"\n' + azure_query(node, "DROP TABLE test_simple_write_connection_string") def test_simple_write_named_collection_1(cluster): @@ -184,7 +189,7 @@ def test_simple_write_named_collection_1(cluster): ) print(get_azure_file_content("test_simple_write_named.csv", port)) assert get_azure_file_content("test_simple_write_named.csv", port) == '1,"a"\n' - azure_query(node, "TRUNCATE TABLE test_simple_write_named_collection_1") + azure_query(node, "DROP TABLE test_simple_write_named_collection_1") def test_simple_write_named_collection_2(cluster): @@ -201,6 +206,7 @@ def test_simple_write_named_collection_2(cluster): ) print(get_azure_file_content("test_simple_write_named_2.csv", port)) assert get_azure_file_content("test_simple_write_named_2.csv", port) == '1,"a"\n' + azure_query(node, "DROP TABLE test_simple_write_named_collection_2") def test_partition_by(cluster): @@ -222,6 +228,7 @@ def test_partition_by(cluster): assert "1,2,3\n" == get_azure_file_content("test_3.csv", port) assert "3,2,1\n" == get_azure_file_content("test_1.csv", port) assert "78,43,45\n" == get_azure_file_content("test_45.csv", port) + azure_query(node, "DROP TABLE test_partitioned_write") def test_partition_by_string_column(cluster): @@ -242,6 +249,7 @@ def test_partition_by_string_column(cluster): assert '1,"foo/bar"\n' == get_azure_file_content("test_foo/bar.csv", port) assert '3,"йцук"\n' == get_azure_file_content("test_йцук.csv", port) assert '78,"你好"\n' == get_azure_file_content("test_你好.csv", port) + azure_query(node, "DROP TABLE test_partitioned_string_write") def test_partition_by_const_column(cluster): @@ -260,6 +268,7 @@ def test_partition_by_const_column(cluster): ) azure_query(node, f"INSERT INTO test_partitioned_const_write VALUES {values}") assert values_csv == get_azure_file_content("test_88.csv", port) + azure_query(node, "DROP TABLE test_partitioned_const_write") def test_truncate(cluster): @@ -275,6 +284,7 @@ def test_truncate(cluster): azure_query(node, "TRUNCATE TABLE test_truncate") with pytest.raises(Exception): print(get_azure_file_content("test_truncate.csv", port)) + azure_query(node, "DROP TABLE test_truncate") def test_simple_read_write(cluster): @@ -291,6 +301,7 @@ def test_simple_read_write(cluster): assert get_azure_file_content("test_simple_read_write.csv", port) == '1,"a"\n' print(azure_query(node, "SELECT * FROM test_simple_read_write")) assert azure_query(node, "SELECT * FROM test_simple_read_write") == "1\ta\n" + azure_query(node, "DROP TABLE test_simple_read_write") def test_create_new_files_on_insert(cluster): @@ -343,6 +354,7 @@ def test_overwrite(cluster): result = azure_query(node, f"select count() from test_overwrite") assert int(result) == 200 + azure_query(node, f"DROP TABLE test_overwrite") def test_insert_with_path_with_globs(cluster): @@ -355,6 +367,7 @@ def test_insert_with_path_with_globs(cluster): node.query_and_get_error( f"insert into table function test_insert_globs SELECT number, randomString(100) FROM numbers(500)" ) + azure_query(node, f"DROP TABLE test_insert_globs") def test_put_get_with_globs(cluster): @@ -363,6 +376,7 @@ def test_put_get_with_globs(cluster): node = cluster.instances["node"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" max_path = "" + used_names = [] for i in range(10): for j in range(10): path = "{}/{}_{}/{}.csv".format( @@ -371,6 +385,8 @@ def test_put_get_with_globs(cluster): max_path = max(path, max_path) values = f"({i},{j},{i + j})" + used_names.append(f"test_put_{i}_{j}") + azure_query( node, f"CREATE TABLE test_put_{i}_{j} ({table_format}) Engine = AzureBlobStorage(azure_conf2, " @@ -391,6 +407,9 @@ def test_put_get_with_globs(cluster): bucket="cont", max_path=max_path ) ] + azure_query(node, "DROP TABLE test_glob_select") + for name in used_names: + azure_query(node, f"DROP TABLE {name}") def test_azure_glob_scheherazade(cluster): @@ -399,12 +418,14 @@ def test_azure_glob_scheherazade(cluster): values = "(1, 1, 1)" nights_per_job = 1001 // 30 jobs = [] + used_names = [] for night in range(0, 1001, nights_per_job): def add_tales(start, end): for i in range(start, end): path = "night_{}/tale.csv".format(i) unique_num = random.randint(1, 10000) + used_names.append(f"test_scheherazade_{i}_{unique_num}") azure_query( node, f"CREATE TABLE test_scheherazade_{i}_{unique_num} ({table_format}) Engine = AzureBlobStorage(azure_conf2, " @@ -432,6 +453,9 @@ def test_azure_glob_scheherazade(cluster): ) query = "select count(), sum(column1), sum(column2), sum(column3) from test_glob_select_scheherazade" assert azure_query(node, query).splitlines() == ["1001\t1001\t1001\t1001"] + azure_query(node, "DROP TABLE test_glob_select_scheherazade") + for name in used_names: + azure_query(node, f"DROP TABLE {name}") @pytest.mark.parametrize( @@ -505,6 +529,8 @@ def test_schema_inference_no_globs(cluster): assert azure_query(node, query).splitlines() == [ "499500\t2890\t332833500\ttest_schema_inference_no_globs.csv\tcont/test_schema_inference_no_globs.csv" ] + azure_query(node, f"DROP TABLE test_schema_inference_src") + azure_query(node, f"DROP TABLE test_select_inference") def test_schema_inference_from_globs(cluster): @@ -513,6 +539,7 @@ def test_schema_inference_from_globs(cluster): node = cluster.instances["node"] # type: ClickHouseInstance table_format = "column1 UInt32, column2 UInt32, column3 UInt32" max_path = "" + used_names = [] for i in range(10): for j in range(10): path = "{}/{}_{}/{}.csv".format( @@ -520,6 +547,7 @@ def test_schema_inference_from_globs(cluster): ) max_path = max(path, max_path) values = f"({i},{j},{i + j})" + used_names.append(f"test_schema_{i}_{j}") azure_query( node, @@ -545,6 +573,9 @@ def test_schema_inference_from_globs(cluster): bucket="cont", max_path=max_path ) ] + azure_query(node, "DROP TABLE test_glob_select_inference") + for name in used_names: + azure_query(node, f"DROP TABLE {name}") def test_simple_write_account_string_table_function(cluster): @@ -594,7 +625,7 @@ def test_simple_write_named_collection_1_table_function(cluster): azure_query( node, - "TRUNCATE TABLE drop_table", + "DROP TABLE drop_table", ) @@ -605,6 +636,7 @@ def test_simple_write_named_collection_2_table_function(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," f" container='cont', blob_path='test_simple_write_named_2_tf.csv', format='CSV', structure='key UInt64, data String') VALUES (1, 'a')", + settings={"azure_truncate_on_insert": 1}, ) print(get_azure_file_content("test_simple_write_named_2_tf.csv", port)) assert get_azure_file_content("test_simple_write_named_2_tf.csv", port) == '1,"a"\n' @@ -628,6 +660,7 @@ def test_put_get_with_globs_tf(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) query = ( f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, " @@ -648,7 +681,7 @@ def test_schema_inference_no_globs_tf(cluster): query = ( f"insert into table function azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " f"container='cont', blob_path='test_schema_inference_no_globs_tf.csv', format='CSVWithNames', structure='{table_format}') " - f"SELECT number, toString(number), number * number FROM numbers(1000)" + f"SELECT number, toString(number), number * number FROM numbers(1000) SETTINGS azure_truncate_on_insert=1" ) azure_query(node, query) @@ -681,7 +714,7 @@ def test_schema_inference_from_globs_tf(cluster): f"insert into table function azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " f"container='cont', blob_path='{path}', format='CSVWithNames', structure='{table_format}') VALUES {values}" ) - azure_query(node, query) + azure_query(node, query, settings={"azure_truncate_on_insert": 1}) query = ( f"select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from azureBlobStorage(azure_conf2, " @@ -708,6 +741,7 @@ def test_partition_by_tf(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', " f"'cont', '{filename}', 'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', " f"'CSV', 'auto', '{table_format}') PARTITION BY {partition_by} VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) assert "1,2,3\n" == get_azure_file_content("test_partition_tf_3.csv", port) @@ -727,6 +761,7 @@ def test_filter_using_file(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', 'cont', '{filename}', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', " f"'{table_format}') PARTITION BY {partition_by} VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) query = ( @@ -744,7 +779,7 @@ def test_read_subcolumns(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_subcolumns.tsv', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto'," - f" 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3)", + f" 'a Tuple(b Tuple(c UInt32, d UInt32), e UInt32)') select ((1, 2), 3) SETTINGS azure_truncate_on_insert=1", ) azure_query( @@ -794,7 +829,7 @@ def test_read_subcolumn_time(cluster): node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_subcolumn_time.tsv', " f"'devstoreaccount1', 'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto'," - f" 'a UInt32') select (42)", + f" 'a UInt32') select (42) SETTINGS azure_truncate_on_insert=1", ) res = node.query( @@ -825,6 +860,7 @@ def test_function_signatures(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_signature.csv', '{account_name}', '{account_key}', 'CSV', 'auto', 'column1 UInt32') VALUES (1),(2),(3)", + settings={"azure_truncate_on_insert": 1}, ) # " - connection_string, container_name, blobpath\n" @@ -939,11 +975,13 @@ def test_union_schema_inference_mode(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference1.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'a UInt32') VALUES (1)", + settings={"azure_truncate_on_insert": 1}, ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference2.jsonl', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'b UInt32') VALUES (2)", + settings={"azure_truncate_on_insert": 1}, ) node.query("system drop schema cache for azure") @@ -981,6 +1019,7 @@ def test_union_schema_inference_mode(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_union_schema_inference3.jsonl', '{account_name}', '{account_key}', 'CSV', 'auto', 's String') VALUES ('Error')", + settings={"azure_truncate_on_insert": 1}, ) error = azure_query( @@ -1002,7 +1041,7 @@ def test_schema_inference_cache(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cache0.jsonl', '{account_name}', '{account_key}') " - f"select * from numbers(100)", + f"select * from numbers(100) SETTINGS azure_truncate_on_insert=1", ) time.sleep(1) @@ -1209,19 +1248,19 @@ def test_filtering_by_file_or_path(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_filter1.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_filter2.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 2", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 2 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_filter3.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 3", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 3 SETTINGS azure_truncate_on_insert=1", ) node.query( @@ -1233,7 +1272,7 @@ def test_filtering_by_file_or_path(cluster): node.query("SYSTEM FLUSH LOGS") result = node.query( - f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query ilike '%select%azure%test_filter%' AND type='QueryFinish'" + f"SELECT ProfileEvents['EngineFileLikeReadFiles'] FROM system.query_log WHERE query ilike '%select%azure%test_filter%' AND type='QueryFinish' ORDER BY event_time_microseconds DESC LIMIT 1" ) assert int(result) == 1 @@ -1245,19 +1284,19 @@ def test_size_virtual_column(cluster): azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column1.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 1 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}','cont', 'test_size_virtual_column2.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 11 SETTINGS azure_truncate_on_insert=1", ) azure_query( node, f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_size_virtual_column3.tsv', 'devstoreaccount1', " - f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111", + f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', 'auto', 'x UInt64') select 111 SETTINGS azure_truncate_on_insert=1", ) result = azure_query( @@ -1280,7 +1319,7 @@ def test_format_detection(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt64, y String') select number as x, 'str_' || toString(number) from numbers(0)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt64, y String') select number as x, 'str_' || toString(number) from numbers(0) SETTINGS azure_truncate_on_insert=1", ) azure_query( @@ -1350,7 +1389,7 @@ def test_write_to_globbed_partitioned_path(cluster): account_key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" error = azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_data_*_{{_partition_id}}', '{account_name}', '{account_key}', 'CSV', 'auto', 'x UInt64') partition by 42 select 42", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_data_*_{{_partition_id}}', '{account_name}', '{account_key}', 'CSV', 'auto', 'x UInt64') partition by 42 select 42 SETTINGS azure_truncate_on_insert=1", expect_error="true", ) @@ -1462,3 +1501,115 @@ def test_insert_create_new_file(cluster): assert TSV(res) == TSV( "test_create_new_file.csv\t1\ntest_create_new_file.1.csv\t2\n" ) + + +def test_hive_partitioning_with_one_parameter(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values = f"('Elizabeth', 'Gordon')" + path = "a/column1=Elizabeth/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values}", + settings={"azure_truncate_on_insert": 1}, + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}')" + ) + assert azure_query( + node, query, settings={"use_hive_partitioning": 1} + ).splitlines() == [ + "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth".format( + bucket="cont", max_path=path + ) + ] + + query = ( + f"SELECT column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + assert azure_query( + node, query, settings={"use_hive_partitioning": 1} + ).splitlines() == ["Gordon"] + + +def test_hive_partitioning_with_two_parameters(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values_1 = f"('Elizabeth', 'Gordon')" + values_2 = f"('Emilia', 'Gregor')" + path = "a/column1=Elizabeth/column2=Gordon/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + settings={"azure_truncate_on_insert": 1}, + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + assert azure_query( + node, query, settings={"use_hive_partitioning": 1} + ).splitlines() == [ + "Elizabeth\tGordon\tsample.csv\t{bucket}/{max_path}\tElizabeth\tGordon".format( + bucket="cont", max_path=path + ) + ] + + query = ( + f"SELECT column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2;" + ) + assert azure_query( + node, query, settings={"use_hive_partitioning": 1} + ).splitlines() == ["Elizabeth"] + + query = ( + f"SELECT column1 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column2=_column2 AND column1=_column1;" + ) + assert azure_query( + node, query, settings={"use_hive_partitioning": 1} + ).splitlines() == ["Elizabeth"] + + +def test_hive_partitioning_without_setting(cluster): + # type: (ClickHouseCluster) -> None + node = cluster.instances["node"] # type: ClickHouseInstance + table_format = "column1 String, column2 String" + values_1 = f"('Elizabeth', 'Gordon')" + values_2 = f"('Emilia', 'Gregor')" + path = "a/column1=Elizabeth/column2=Gordon/sample.csv" + + azure_query( + node, + f"INSERT INTO TABLE FUNCTION azureBlobStorage(azure_conf2, storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}'," + f" container='cont', blob_path='{path}', format='CSV', compression='auto', structure='{table_format}') VALUES {values_1}, {values_2}", + settings={"azure_truncate_on_insert": 1}, + ) + + query = ( + f"SELECT column1, column2, _file, _path, _column1, _column2 FROM azureBlobStorage(azure_conf2, " + f"storage_account_url = '{cluster.env_variables['AZURITE_STORAGE_ACCOUNT_URL']}', container='cont', " + f"blob_path='{path}', format='CSV', structure='{table_format}') WHERE column1=_column1;" + ) + pattern = re.compile( + r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL + ) + + with pytest.raises(Exception, match=pattern): + azure_query(node, query, settings={"use_hive_partitioning": 0}) diff --git a/tests/integration/test_storage_azure_blob_storage/test_cluster.py b/tests/integration/test_storage_azure_blob_storage/test_cluster.py index 6c5e2d20ca5..04baf007c69 100644 --- a/tests/integration/test_storage_azure_blob_storage/test_cluster.py +++ b/tests/integration/test_storage_azure_blob_storage/test_cluster.py @@ -72,6 +72,7 @@ def test_select_all(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_select_all.csv', 'devstoreaccount1'," f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', 'key UInt64, data String') " f"VALUES (1, 'a'), (2, 'b')", + settings={"azure_truncate_on_insert": 1}, ) print(get_azure_file_content("test_cluster_select_all.csv", port)) @@ -101,6 +102,7 @@ def test_count(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_count.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', " f"'auto', 'key UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) print(get_azure_file_content("test_cluster_count.csv", port)) @@ -129,6 +131,7 @@ def test_union_all(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_parquet_union_all', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'Parquet', " f"'auto', 'a Int32, b String') VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')", + settings={"azure_truncate_on_insert": 1}, ) pure_azure = azure_query( @@ -180,6 +183,7 @@ def test_skip_unavailable_shards(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_skip_unavailable.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " f"'auto', 'a UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) result = azure_query( node, @@ -200,6 +204,7 @@ def test_unset_skip_unavailable_shards(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_unset_skip_unavailable.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " f"'auto', 'a UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) result = azure_query( node, @@ -218,6 +223,7 @@ def test_cluster_with_named_collection(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_cluster_with_named_collection.csv', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'auto', " f"'auto', 'a UInt64') VALUES (1), (2)", + settings={"azure_truncate_on_insert": 1}, ) pure_azure = azure_query( @@ -249,6 +255,7 @@ def test_partition_parallel_reading_with_cluster(cluster): f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', '{filename}', 'devstoreaccount1', " f"'Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==', 'CSV', 'auto', '{table_format}') " f"PARTITION BY {partition_by} VALUES {values}", + settings={"azure_truncate_on_insert": 1}, ) assert "1,2,3\n" == get_azure_file_content("test_tf_3.csv", port) @@ -272,12 +279,12 @@ def test_format_detection(cluster): azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection0', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10) SETTINGS azure_truncate_on_insert=1", ) azure_query( node, - f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection1', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10, 10)", + f"INSERT INTO TABLE FUNCTION azureBlobStorage('{storage_account_url}', 'cont', 'test_format_detection1', '{account_name}', '{account_key}', 'JSONEachRow', 'auto', 'x UInt32, y String') select number as x, 'str_' || toString(number) from numbers(10, 10) SETTINGS azure_truncate_on_insert=1", ) expected_desc_result = azure_query( diff --git a/tests/integration/test_storage_delta/test.py b/tests/integration/test_storage_delta/test.py index 054b79ff6fe..a595d01e6b3 100644 --- a/tests/integration/test_storage_delta/test.py +++ b/tests/integration/test_storage_delta/test.py @@ -464,7 +464,7 @@ def test_restart_broken(started_cluster): """ SELECT value FROM system.metrics - WHERE metric = 'S3DiskNoKeyErrors' + WHERE metric = 'DiskS3NoSuchKeyErrors' """ ).strip() ) diff --git a/tests/integration/test_storage_hdfs/test.py b/tests/integration/test_storage_hdfs/test.py index aef5ddb3675..fdbf7c5bacb 100644 --- a/tests/integration/test_storage_hdfs/test.py +++ b/tests/integration/test_storage_hdfs/test.py @@ -3,7 +3,9 @@ import os import pytest import uuid import time +import re from helpers.cluster import ClickHouseCluster, is_arm +from helpers.client import QueryRuntimeException from helpers.test_tools import TSV from pyhdfs import HdfsClient @@ -1255,6 +1257,55 @@ def test_respect_object_existence_on_partitioned_write(started_cluster): assert int(result) == 44 +def test_hive_partitioning_with_one_parameter(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data(f"/column0=Elizabeth/parquet_1", f"Elizabeth\tGordon\n") + assert hdfs_api.read_data(f"/column0=Elizabeth/parquet_1") == f"Elizabeth\tGordon\n" + + r = node1.query( + "SELECT _column0 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/parquet_1', 'TSV')", + settings={"use_hive_partitioning": 1}, + ) + assert r == f"Elizabeth\n" + + +def test_hive_partitioning_with_two_parameters(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") + == f"Elizabeth\tGordon\n" + ) + + r = node1.query( + "SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", + settings={"use_hive_partitioning": 1}, + ) + assert r == f"Gordon\n" + + +def test_hive_partitioning_without_setting(started_cluster): + hdfs_api = started_cluster.hdfs_api + hdfs_api.write_data( + f"/column0=Elizabeth/column1=Gordon/parquet_2", f"Elizabeth\tGordon\n" + ) + assert ( + hdfs_api.read_data(f"/column0=Elizabeth/column1=Gordon/parquet_2") + == f"Elizabeth\tGordon\n" + ) + pattern = re.compile( + r"DB::Exception: Unknown expression identifier '.*' in scope.*", re.DOTALL + ) + + with pytest.raises(QueryRuntimeException, match=pattern): + node1.query( + f"SELECT _column1 FROM hdfs('hdfs://hdfs1:9000/column0=Elizabeth/column1=Gordon/parquet_2', 'TSV');", + settings={"use_hive_partitioning": 0}, + ) + + if __name__ == "__main__": cluster.start() input("Cluster created, press any key to destroy...") diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp new file mode 100644 index 00000000000..4f3eabe22f0 --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.capnp @@ -0,0 +1,6 @@ +@0x99f75f775fe63dae; + +struct StringKeyValuePair { + key@0 : Text; + value@1 : Text; +} diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format new file mode 100644 index 00000000000..83dff6ce401 --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.format @@ -0,0 +1 @@ +(key = ${key:CSV}, value = ${value:CSV}) diff --git a/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto new file mode 100644 index 00000000000..71905c63bdf --- /dev/null +++ b/tests/integration/test_storage_kafka/clickhouse_path/format_schemas/string_key_value.proto @@ -0,0 +1,6 @@ +syntax = "proto3"; + +message StringKeyValuePair { + string key = 1; + string value = 2; +} diff --git a/tests/integration/test_storage_kafka/test.py b/tests/integration/test_storage_kafka/test.py index 4b6c9922d74..bef90e1b9d3 100644 --- a/tests/integration/test_storage_kafka/test.py +++ b/tests/integration/test_storage_kafka/test.py @@ -1019,7 +1019,7 @@ def test_kafka_formats(kafka_cluster, create_query_generator): DROP TABLE IF EXISTS test.kafka_{format_name}_mv; - CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS + CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv ENGINE=MergeTree ORDER BY tuple() AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}; """.format( topic_name=topic_name, @@ -2460,7 +2460,7 @@ def test_kafka_commit_on_block_write(kafka_cluster, create_query_generator): (generate_old_create_table_query, "kafka.*Committed offset 2.*virt2_[01]"), ( generate_new_create_table_query, - r"kafka.*Saved offset 2[0-9]* for topic-partition \[virt2_[01]:[0-9]+", + r"kafka.*Saved offset 2 for topic-partition \[virt2_[01]:[0-9]+", ), ], ) @@ -2494,7 +2494,7 @@ def test_kafka_virtual_columns2(kafka_cluster, create_query_generator, log_line) f""" {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY tuple() AS SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka; """ ) @@ -2729,7 +2729,7 @@ def test_kafka_produce_key_timestamp(kafka_cluster, create_query_generator, log_ DROP TABLE IF EXISTS test.consumer; {writer_create_query}; {reader_create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY tuple() AS SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka; """ ) @@ -2865,7 +2865,7 @@ def test_kafka_produce_consume_avro(kafka_cluster, create_query_generator): {writer_create_query}; {reader_create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY tuple() AS SELECT key, value FROM test.kafka; """ ) @@ -3537,7 +3537,7 @@ def test_bad_reschedule(kafka_cluster, create_query_generator): f""" {create_query}; - CREATE MATERIALIZED VIEW test.destination Engine=Log AS + CREATE MATERIALIZED VIEW test.destination ENGINE=MergeTree ORDER BY tuple() AS SELECT key, now() as consume_ts, @@ -3745,7 +3745,7 @@ def test_kafka_unavailable(kafka_cluster, create_query_generator, do_direct_read f""" {create_query}; - CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS + CREATE MATERIALIZED VIEW test.destination_unavailable ENGINE=MergeTree ORDER BY tuple() AS SELECT key, now() as consume_ts, @@ -4267,12 +4267,12 @@ def test_kafka_formats_with_broken_message(kafka_cluster, create_query_generator {create_query}; DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv; - CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS + CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv ENGINE=MergeTree ORDER BY tuple() AS SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name} WHERE length(_error) = 0; DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv; - CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS + CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv ENGINE=MergeTree ORDER BY tuple() AS SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name} WHERE length(_error) > 0; """ @@ -4796,7 +4796,7 @@ def test_max_rows_per_message(kafka_cluster, create_query_generator): DROP TABLE IF EXISTS test.kafka; {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.kafka; """ ) @@ -4875,7 +4875,7 @@ def test_row_based_formats(kafka_cluster, create_query_generator): {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.{table_name}; INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}); @@ -4982,7 +4982,7 @@ def test_block_based_formats_2(kafka_cluster, create_query_generator): {create_query}; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.{table_name}; INSERT INTO test.{table_name} SELECT number * 10 as key, number * 100 as value FROM numbers({num_rows}) settings max_block_size=12, optimize_trivial_insert_select=0; @@ -5362,7 +5362,7 @@ def test_formats_errors(kafka_cluster): input_format_with_names_use_header=0, format_schema='key_value_message:Message'; - CREATE MATERIALIZED VIEW test.view Engine=Log AS + CREATE MATERIALIZED VIEW test.view ENGINE=MergeTree ORDER BY (key, value) AS SELECT key, value FROM test.{table_name}; """ ) diff --git a/tests/integration/test_storage_kafka/test_produce_http_interface.py b/tests/integration/test_storage_kafka/test_produce_http_interface.py new file mode 100644 index 00000000000..fc10a07f239 --- /dev/null +++ b/tests/integration/test_storage_kafka/test_produce_http_interface.py @@ -0,0 +1,243 @@ +import time +import logging + +import pytest +from helpers.cluster import ClickHouseCluster, is_arm +from helpers.test_tools import TSV +from kafka import KafkaAdminClient +from kafka.admin import NewTopic + +if is_arm(): + pytestmark = pytest.mark.skip + +cluster = ClickHouseCluster(__file__) +instance = cluster.add_instance( + "instance", + main_configs=["configs/kafka.xml", "configs/named_collection.xml"], + user_configs=["configs/users.xml"], + with_kafka=True, + with_zookeeper=True, # For Replicated Table + macros={ + "kafka_broker": "kafka1", + "kafka_topic_old": "old", + "kafka_group_name_old": "old", + "kafka_topic_new": "new", + "kafka_group_name_new": "new", + "kafka_client_id": "instance", + "kafka_format_json_each_row": "JSONEachRow", + }, + clickhouse_path_dir="clickhouse_path", +) + + +@pytest.fixture(scope="module") +def kafka_cluster(): + try: + cluster.start() + kafka_id = instance.cluster.kafka_docker_id + print(("kafka_id is {}".format(kafka_id))) + yield cluster + finally: + cluster.shutdown() + + +@pytest.fixture(autouse=True) +def kafka_setup_teardown(): + instance.query("DROP DATABASE IF EXISTS test; CREATE DATABASE test;") + # logging.debug("kafka is available - running test") + yield # run test + + +def kafka_create_topic( + admin_client, + topic_name, + num_partitions=1, + replication_factor=1, + max_retries=50, + config=None, +): + logging.debug( + f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}" + ) + topics_list = [ + NewTopic( + name=topic_name, + num_partitions=num_partitions, + replication_factor=replication_factor, + topic_configs=config, + ) + ] + retries = 0 + while True: + try: + admin_client.create_topics(new_topics=topics_list, validate_only=False) + logging.debug("Admin client succeed") + return + except Exception as e: + retries += 1 + time.sleep(0.5) + if retries < max_retries: + logging.warning(f"Failed to create topic {e}") + else: + raise + + +def kafka_delete_topic(admin_client, topic, max_retries=50): + result = admin_client.delete_topics([topic]) + for topic, e in result.topic_error_codes: + if e == 0: + logging.debug(f"Topic {topic} deleted") + else: + logging.error(f"Failed to delete topic {topic}: {e}") + + retries = 0 + while True: + topics_listed = admin_client.list_topics() + logging.debug(f"TOPICS LISTED: {topics_listed}") + if topic not in topics_listed: + return + else: + retries += 1 + time.sleep(0.5) + if retries > max_retries: + raise Exception(f"Failed to delete topics {topic}, {result}") + + +def test_kafka_produce_http_interface_row_based_format(kafka_cluster): + # reproduction of #61060 with validating the written messages + admin_client = KafkaAdminClient( + bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port) + ) + + topic_prefix = "http_row_" + + # It is important to have: + # - long enough messages + # - enough messages + # I don't know the exact requirement for message sizes, but it doesn't reproduce with short messages + # For the number of messages it seems like at least 3 messages is necessary + expected_key = "01234567890123456789" + expected_value = "aaaaabbbbbccccc" + + insert_query_end = f"(key, value) VALUES ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}'), ('{expected_key}', '{expected_value}')" + insert_query_template = "INSERT INTO {table_name} " + insert_query_end + + extra_settings = { + "Protobuf": ", kafka_schema = 'string_key_value.proto:StringKeyValuePair'", + "CapnProto": ", kafka_schema='string_key_value:StringKeyValuePair'", + "Template": ", format_template_row='string_key_value.format'", + } + + # Only the formats that can be used both and input and output format are tested + # Reasons to exclude following formats: + # - JSONStrings: not actually an input format + # - ProtobufSingle: I cannot make it work to parse the messages. Probably something is broken, + # because the producer can write multiple rows into a same message, which makes them impossible to parse properly. Should added after #67549 is fixed. + # - ProtobufList: I didn't want to deal with the envelope and stuff + # - Npy: supports only single column + # - LineAsString: supports only single column + # - RawBLOB: supports only single column + formats_to_test = [ + "TabSeparated", + "TabSeparatedRaw", + "TabSeparatedWithNames", + "TabSeparatedWithNamesAndTypes", + "TabSeparatedRawWithNames", + "TabSeparatedRawWithNamesAndTypes", + "Template", + "CSV", + "CSVWithNames", + "CSVWithNamesAndTypes", + "CustomSeparated", + "CustomSeparatedWithNames", + "CustomSeparatedWithNamesAndTypes", + "Values", + "JSON", + "JSONColumns", + "JSONColumnsWithMetadata", + "JSONCompact", + "JSONCompactColumns", + "JSONEachRow", + "JSONStringsEachRow", + "JSONCompactEachRow", + "JSONCompactEachRowWithNames", + "JSONCompactEachRowWithNamesAndTypes", + "JSONCompactStringsEachRow", + "JSONCompactStringsEachRowWithNames", + "JSONCompactStringsEachRowWithNamesAndTypes", + "JSONObjectEachRow", + "BSONEachRow", + "TSKV", + "Protobuf", + "Avro", + "Parquet", + "Arrow", + "ArrowStream", + "ORC", + "RowBinary", + "RowBinaryWithNames", + "RowBinaryWithNamesAndTypes", + "Native", + "CapnProto", + "MsgPack", + ] + for format in formats_to_test: + logging.debug(f"Creating tables and writing messages to {format}") + topic = topic_prefix + format + kafka_create_topic(admin_client, topic) + + extra_setting = extra_settings.get(format, "") + + # kafka_max_rows_per_message is set to 2 to make sure every format produces at least 2 messages, thus increasing the chance of catching a bug + instance.query( + f""" + DROP TABLE IF EXISTS test.view_{topic}; + DROP TABLE IF EXISTS test.consumer_{topic}; + CREATE TABLE test.kafka_writer_{topic} (key String, value String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic}', + kafka_group_name = '{topic}', + kafka_format = '{format}', + kafka_max_rows_per_message = 2 {extra_setting}; + + CREATE TABLE test.kafka_{topic} (key String, value String) + ENGINE = Kafka + SETTINGS kafka_broker_list = 'kafka1:19092', + kafka_topic_list = '{topic}', + kafka_group_name = '{topic}', + kafka_format = '{format}' {extra_setting}; + + CREATE MATERIALIZED VIEW test.view_{topic} Engine=Log AS + SELECT key, value FROM test.kafka_{topic}; + """ + ) + instance.http_query( + insert_query_template.format(table_name="test.kafka_writer_" + topic), + method="POST", + ) + + expected = f"""\ +{expected_key}\t{expected_value} +{expected_key}\t{expected_value} +{expected_key}\t{expected_value} +""" + # give some times for the readers to read the messages + for format in formats_to_test: + logging.debug(f"Checking result for {format}") + topic = topic_prefix + format + + result = instance.query_with_retry( + f"SELECT * FROM test.view_{topic}", + check_callback=lambda res: res.count("\n") == 3, + ) + + assert TSV(result) == TSV(expected) + + kafka_delete_topic(admin_client, topic) + + +if __name__ == "__main__": + cluster.start() + input("Cluster created, press any key to destroy...") + cluster.shutdown() diff --git a/tests/integration/test_storage_s3_queue/test.py b/tests/integration/test_storage_s3_queue/test.py index 92d6f181464..8f197e09e61 100644 --- a/tests/integration/test_storage_s3_queue/test.py +++ b/tests/integration/test_storage_s3_queue/test.py @@ -771,7 +771,11 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): table_name, mode, files_path, - additional_settings={"keeper_path": keeper_path, "s3queue_buckets": 2}, + additional_settings={ + "keeper_path": keeper_path, + "s3queue_buckets": 2, + **({"s3queue_processing_threads_num": 1} if mode == "ordered" else {}), + }, ) for instance in [node, node_2]: @@ -806,6 +810,10 @@ def test_multiple_tables_streaming_sync_distributed(started_cluster, mode): list(map(int, l.split())) for l in run_query(node_2, get_query).splitlines() ] + logging.debug( + f"res1 size: {len(res1)}, res2 size: {len(res2)}, total_rows: {total_rows}" + ) + assert len(res1) + len(res2) == total_rows # Checking that all engines have made progress diff --git a/tests/integration/test_system_flush_logs/test.py b/tests/integration/test_system_flush_logs/test.py index 713b327eb76..cfecea5b3d6 100644 --- a/tests/integration/test_system_flush_logs/test.py +++ b/tests/integration/test_system_flush_logs/test.py @@ -4,7 +4,7 @@ import pytest from helpers.cluster import ClickHouseCluster -from helpers.test_tools import assert_eq_with_retry +from helpers.test_tools import assert_eq_with_retry, assert_logs_contain_with_retry, TSV cluster = ClickHouseCluster(__file__) node = cluster.add_instance( @@ -12,17 +12,6 @@ node = cluster.add_instance( stay_alive=True, ) -system_logs = [ - # enabled by default - ("system.text_log", 1), - ("system.query_log", 1), - ("system.query_thread_log", 1), - ("system.part_log", 1), - ("system.trace_log", 1), - ("system.metric_log", 1), - ("system.error_log", 1), -] - @pytest.fixture(scope="module", autouse=True) def start_cluster(): @@ -33,22 +22,28 @@ def start_cluster(): cluster.shutdown() -@pytest.fixture(scope="function") -def flush_logs(): +def test_system_logs_exists(): + system_logs = [ + ("system.text_log", 1), + ("system.query_log", 1), + ("system.query_thread_log", 1), + ("system.part_log", 1), + ("system.trace_log", 1), + ("system.metric_log", 1), + ("system.error_log", 1), + ] + node.query("SYSTEM FLUSH LOGS") - - -@pytest.mark.parametrize("table,exists", system_logs) -def test_system_logs(flush_logs, table, exists): - q = "SELECT * FROM {}".format(table) - if exists: - node.query(q) - else: - response = node.query_and_get_error(q) - assert ( - "Table {} does not exist".format(table) in response - or "Unknown table expression identifier '{}'".format(table) in response - ) + for table, exists in system_logs: + q = "SELECT * FROM {}".format(table) + if exists: + node.query(q) + else: + response = node.query_and_get_error(q) + assert ( + "Table {} does not exist".format(table) in response + or "Unknown table expression identifier '{}'".format(table) in response + ) # Logic is tricky, let's check that there is no hang in case of message queue @@ -67,14 +62,19 @@ def test_system_logs_non_empty_queue(): def test_system_suspend(): - node.query("CREATE TABLE t (x DateTime) ENGINE=Memory;") - node.query("INSERT INTO t VALUES (now());") - node.query("SYSTEM SUSPEND FOR 1 SECOND;") - node.query("INSERT INTO t VALUES (now());") - assert "1\n" == node.query("SELECT max(x) - min(x) >= 1 FROM t;") + try: + node.query("CREATE TABLE t (x DateTime) ENGINE=Memory;") + node.query("INSERT INTO t VALUES (now());") + node.query("SYSTEM SUSPEND FOR 1 SECOND;") + node.query("INSERT INTO t VALUES (now());") + assert "1\n" == node.query("SELECT max(x) - min(x) >= 1 FROM t;") + finally: + node.query("DROP TABLE IF EXISTS t;") def test_log_max_size(start_cluster): + # we do misconfiguration here: buffer_size_rows_flush_threshold > max_size_rows, flush_interval_milliseconds is huge + # no auto flush by size not by time has a chance node.exec_in_container( [ "bash", @@ -83,6 +83,7 @@ def test_log_max_size(start_cluster): 1000000 + 1000000 10 10 @@ -91,11 +92,24 @@ def test_log_max_size(start_cluster): """, ] ) - node.restart_clickhouse() - for i in range(10): - node.query(f"select {i}") - assert node.query("select count() >= 10 from system.query_log") == "1\n" + node.query("SYSTEM FLUSH LOGS") + node.query(f"TRUNCATE TABLE IF EXISTS system.query_log") + node.restart_clickhouse() + + # all logs records above max_size_rows are lost + # The accepted logs records are never flushed until system flush logs is called by us + for i in range(21): + node.query(f"select {i}") + node.query("system flush logs") + + assert_logs_contain_with_retry( + node, "Queue had been full at 0, accepted 10 logs, ignored 34 logs." + ) + assert node.query( + "select count() >= 10, count() < 20 from system.query_log" + ) == TSV([[1, 1]]) + node.exec_in_container( ["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"] ) diff --git a/tests/integration/test_system_logs_recreate/test.py b/tests/integration/test_system_logs_recreate/test.py index 1bdb1fe3261..8b84734ed02 100644 --- a/tests/integration/test_system_logs_recreate/test.py +++ b/tests/integration/test_system_logs_recreate/test.py @@ -33,124 +33,139 @@ def test_system_logs_recreate(): "error_log", ] - node.query("SYSTEM FLUSH LOGS") - for table in system_logs: - assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") - assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + try: + node.query("SYSTEM FLUSH LOGS") + for table in system_logs: + assert "ENGINE = MergeTree" in node.query( + f"SHOW CREATE TABLE system.{table}" ) - == 1 - ) - - # NOTE: we use zzz- prefix to make it the last file, - # so that it will be applied last. - for table in system_logs: - node.exec_in_container( - [ - "bash", - "-c", - f"""echo " - - <{table}> - ENGINE = Null - - - - " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml - """, - ] - ) - - node.restart_clickhouse() - node.query("SYSTEM FLUSH LOGS") - for table in system_logs: - assert "ENGINE = MergeTree" not in node.query( - f"SHOW CREATE TABLE system.{table}" - ) - assert "ENGINE = Null" in node.query(f"SHOW CREATE TABLE system.{table}") - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + assert "ENGINE = Null" not in node.query( + f"SHOW CREATE TABLE system.{table}" ) - == 2 - ) - - # apply only storage_policy for all system tables - for table in system_logs: - node.exec_in_container( - [ - "bash", - "-c", - f"""echo " - - <{table}> - system_tables - - - " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml - """, - ] - ) - node.restart_clickhouse() - node.query("SYSTEM FLUSH LOGS") - import logging - - for table in system_logs: - create_table_sql = node.query(f"SHOW CREATE TABLE system.{table} FORMAT TSVRaw") - logging.debug( - "With storage policy, SHOW CREATE TABLE system.%s is: %s", - table, - create_table_sql, - ) - assert "ENGINE = MergeTree" in create_table_sql - assert "ENGINE = Null" not in create_table_sql - assert "SETTINGS storage_policy = 'system_tables'" in create_table_sql - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 1 ) - == 3 - ) - for table in system_logs: - node.exec_in_container( - ["rm", f"/etc/clickhouse-server/config.d/zzz-override-{table}.xml"] - ) - - node.restart_clickhouse() - node.query("SYSTEM FLUSH LOGS") - for table in system_logs: - assert "ENGINE = MergeTree" in node.query(f"SHOW CREATE TABLE system.{table}") - assert "ENGINE = Null" not in node.query(f"SHOW CREATE TABLE system.{table}") - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + # NOTE: we use zzz- prefix to make it the last file, + # so that it will be applied last. + for table in system_logs: + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " + + <{table}> + ENGINE = Null + + + + " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml + """, + ] ) - == 4 - ) - node.query("SYSTEM FLUSH LOGS") - # Ensure that there was no superfluous RENAME's - # IOW that the table created only when the structure is indeed different. - for table in system_logs: - assert ( - len( - node.query(f"SHOW TABLES FROM system LIKE '{table}%'") - .strip() - .split("\n") + node.restart_clickhouse() + node.query("SYSTEM FLUSH LOGS") + for table in system_logs: + assert "ENGINE = MergeTree" not in node.query( + f"SHOW CREATE TABLE system.{table}" ) - == 4 - ) + assert "ENGINE = Null" in node.query(f"SHOW CREATE TABLE system.{table}") + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 2 + ) + + # apply only storage_policy for all system tables + for table in system_logs: + node.exec_in_container( + [ + "bash", + "-c", + f"""echo " + + <{table}> + system_tables + + + " > /etc/clickhouse-server/config.d/zzz-override-{table}.xml + """, + ] + ) + node.restart_clickhouse() + node.query("SYSTEM FLUSH LOGS") + import logging + + for table in system_logs: + create_table_sql = node.query( + f"SHOW CREATE TABLE system.{table} FORMAT TSVRaw" + ) + logging.debug( + "With storage policy, SHOW CREATE TABLE system.%s is: %s", + table, + create_table_sql, + ) + assert "ENGINE = MergeTree" in create_table_sql + assert "ENGINE = Null" not in create_table_sql + assert "SETTINGS storage_policy = 'system_tables'" in create_table_sql + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 3 + ) + + for table in system_logs: + node.exec_in_container( + ["rm", f"/etc/clickhouse-server/config.d/zzz-override-{table}.xml"] + ) + + node.restart_clickhouse() + node.query("SYSTEM FLUSH LOGS") + for table in system_logs: + assert "ENGINE = MergeTree" in node.query( + f"SHOW CREATE TABLE system.{table}" + ) + assert "ENGINE = Null" not in node.query( + f"SHOW CREATE TABLE system.{table}" + ) + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 4 + ) + + node.query("SYSTEM FLUSH LOGS") + # Ensure that there was no superfluous RENAME's + # IOW that the table created only when the structure is indeed different. + for table in system_logs: + assert ( + len( + node.query(f"SHOW TABLES FROM system LIKE '{table}%'") + .strip() + .split("\n") + ) + == 4 + ) + finally: + for table in system_logs: + for syffix in range(3): + node.query(f"DROP TABLE IF EXISTS system.{table}_{syffix} sync") def test_drop_system_log(): @@ -173,11 +188,20 @@ def test_drop_system_log(): node.query("system flush logs") node.query("select 2") node.query("system flush logs") - assert node.query("select count() > 0 from system.query_log") == "1\n" + assert node.query("select count() >= 2 from system.query_log") == "1\n" + node.query("drop table system.query_log sync") node.query("select 3") node.query("system flush logs") - assert node.query("select count() > 0 from system.query_log") == "1\n" + assert node.query("select count() >= 1 from system.query_log") == "1\n" + + node.query("drop table system.query_log sync") + node.restart_clickhouse() + node.query("system flush logs") + assert ( + node.query("select count() >= 0 from system.query_log") == "1\n" + ) # we check that query_log just exists + node.exec_in_container( ["rm", f"/etc/clickhouse-server/config.d/yyy-override-query_log.xml"] ) diff --git a/tests/integration/test_throttling/test.py b/tests/integration/test_throttling/test.py index c53c2bb1ddf..4bd96e2756d 100644 --- a/tests/integration/test_throttling/test.py +++ b/tests/integration/test_throttling/test.py @@ -121,21 +121,15 @@ def node_update_config(mode, setting, value=None): node.restart_clickhouse() -def assert_took(took, should_took): +def assert_took(took, should_take): # we need to decrease the lower limit because the server limits could # be enforced by throttling some server background IO instead of query IO # and we have no control over it - # - # and the same for upper limit, it can be slightly larger, due to for - # instance network latencies or CPU starvation - if should_took > 0: - assert took >= should_took * 0.85 and took <= should_took * 1.8 - else: - assert took >= should_took * 0.85 + assert took >= should_take * 0.85 @pytest.mark.parametrize( - "policy,backup_name,mode,setting,value,should_took", + "policy,backup_name,mode,setting,value,should_take", [ # # Local -> Local @@ -149,7 +143,7 @@ def assert_took(took, should_took): 0, id="no_local_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", next_backup_name("local"), @@ -159,7 +153,7 @@ def assert_took(took, should_took): 7, id="user_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", next_backup_name("local"), @@ -181,7 +175,7 @@ def assert_took(took, should_took): 0, id="no_remote_to_local_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", next_backup_name("local"), @@ -191,7 +185,7 @@ def assert_took(took, should_took): 7, id="user_remote_to_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", next_backup_name("local"), @@ -252,7 +246,7 @@ def assert_took(took, should_took): 0, id="no_local_to_remote_throttling", ), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", next_backup_name("remote"), @@ -262,7 +256,7 @@ def assert_took(took, should_took): 7, id="user_local_to_remote_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", next_backup_name("remote"), @@ -274,7 +268,7 @@ def assert_took(took, should_took): ), ], ) -def test_backup_throttling(policy, backup_name, mode, setting, value, should_took): +def test_backup_throttling(policy, backup_name, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -284,7 +278,7 @@ def test_backup_throttling(policy, backup_name, mode, setting, value, should_too """ ) _, took = elapsed(node.query, f"backup table data to {backup_name}") - assert_took(took, should_took) + assert_took(took, should_take) def test_backup_throttling_override(): @@ -305,18 +299,18 @@ def test_backup_throttling_override(): "max_backup_bandwidth": "500K", }, ) - # reading 1e6*8 bytes with 500Ki default bandwith should take (8-0.5)/0.5=15 seconds + # reading 1e6*8 bytes with 500Ki default bandwidth should take (8-0.5)/0.5=15 seconds assert_took(took, 15) @pytest.mark.parametrize( - "policy,mode,setting,value,should_took", + "policy,mode,setting,value,should_take", [ # # Local # pytest.param("default", None, None, None, 0, id="no_local_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", "user", @@ -325,7 +319,7 @@ def test_backup_throttling_override(): 7, id="user_local_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", "server", @@ -338,7 +332,7 @@ def test_backup_throttling_override(): # Remote # pytest.param("s3", None, None, None, 0, id="no_remote_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", "user", @@ -347,7 +341,7 @@ def test_backup_throttling_override(): 7, id="user_remote_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", "server", @@ -358,7 +352,7 @@ def test_backup_throttling_override(): ), ], ) -def test_read_throttling(policy, mode, setting, value, should_took): +def test_read_throttling(policy, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -368,17 +362,17 @@ def test_read_throttling(policy, mode, setting, value, should_took): """ ) _, took = elapsed(node.query, f"select * from data") - assert_took(took, should_took) + assert_took(took, should_take) @pytest.mark.parametrize( - "policy,mode,setting,value,should_took", + "policy,mode,setting,value,should_take", [ # # Local # pytest.param("default", None, None, None, 0, id="no_local_throttling"), - # reading 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "default", "user", @@ -387,7 +381,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): 7, id="local_user_throttling", ), - # reading 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # reading 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "default", "server", @@ -400,7 +394,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): # Remote # pytest.param("s3", None, None, None, 0, id="no_remote_throttling"), - # writing 1e6*8 bytes with 1M default bandwith should take (8-1)/1=7 seconds + # writing 1e6*8 bytes with 1M default bandwidth should take (8-1)/1=7 seconds pytest.param( "s3", "user", @@ -409,7 +403,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): 7, id="user_remote_throttling", ), - # writing 1e6*8 bytes with 2M default bandwith should take (8-2)/2=3 seconds + # writing 1e6*8 bytes with 2M default bandwidth should take (8-2)/2=3 seconds pytest.param( "s3", "server", @@ -420,7 +414,7 @@ def test_read_throttling(policy, mode, setting, value, should_took): ), ], ) -def test_write_throttling(policy, mode, setting, value, should_took): +def test_write_throttling(policy, mode, setting, value, should_take): node_update_config(mode, setting, value) node.query( f""" @@ -429,7 +423,7 @@ def test_write_throttling(policy, mode, setting, value, should_took): """ ) _, took = elapsed(node.query, f"insert into data select * from numbers(1e6)") - assert_took(took, should_took) + assert_took(took, should_take) def test_max_mutations_bandwidth_for_server(): @@ -444,7 +438,7 @@ def test_max_mutations_bandwidth_for_server(): node.query, "alter table data update key = -key where 1 settings mutations_sync = 1", ) - # reading 1e6*8 bytes with 1M/s bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M/s bandwidth should take (8-1)/1=7 seconds assert_took(took, 7) @@ -457,5 +451,5 @@ def test_max_merges_bandwidth_for_server(): ) node.query("insert into data select * from numbers(1e6)") _, took = elapsed(node.query, "optimize table data final") - # reading 1e6*8 bytes with 1M/s bandwith should take (8-1)/1=7 seconds + # reading 1e6*8 bytes with 1M/s bandwidth should take (8-1)/1=7 seconds assert_took(took, 7) diff --git a/tests/integration/test_version_update/configs/log_conf.xml b/tests/integration/test_version_update/configs/log_conf.xml index f9d15e572aa..27c7107ce5e 100644 --- a/tests/integration/test_version_update/configs/log_conf.xml +++ b/tests/integration/test_version_update/configs/log_conf.xml @@ -1,4 +1,4 @@ - + trace /var/log/clickhouse-server/log.log @@ -8,4 +8,4 @@ /var/log/clickhouse-server/stderr.log /var/log/clickhouse-server/stdout.log - + diff --git a/tests/performance/insert_select_squashing.xml b/tests/performance/insert_select_squashing.xml new file mode 100644 index 00000000000..4c2c88f3d22 --- /dev/null +++ b/tests/performance/insert_select_squashing.xml @@ -0,0 +1,23 @@ + + + 1000 + + + +CREATE TABLE squash_performance +( + s1 String, + s2 Nullable(String), + a1 Array(Array(String)), + a2 Array(Array(UInt32)), + m1 Map(String, Array(String)), + m2 Map(String, Array(UInt64)), + t Tuple(String, Array(String), Map(String, String)) +) +ENGINE = Null; + + + INSERT INTO squash_performance SELECT * FROM generateRandom(42) LIMIT 500000 + + DROP TABLE IF EXISTS squash_performance + diff --git a/tests/performance/insert_select_squashing_dynamic.xml b/tests/performance/insert_select_squashing_dynamic.xml new file mode 100644 index 00000000000..f7f600fd8bd --- /dev/null +++ b/tests/performance/insert_select_squashing_dynamic.xml @@ -0,0 +1,59 @@ + + + 1000 + 0 + 1 + + + +CREATE TABLE dynamic_squash_performance_1 +( + d Dynamic +) +ENGINE = Null; + + + +CREATE TABLE dynamic_squash_performance_2 +( + d Dynamic(max_types=6) +) +ENGINE = Null; + + + +CREATE TABLE src_dynamic_squash_performance_1 +( + d Dynamic +) +ENGINE = Memory; + + + +CREATE TABLE src_dynamic_squash_performance_2 +( + d Dynamic(max_types=6) +) +ENGINE = Memory; + + + + + + + + + + + + INSERT INTO dynamic_squash_performance_1 SELECT number::Dynamic FROM numbers(10000000) + INSERT INTO dynamic_squash_performance_1 SELECT range(number % 100)::Dynamic FROM numbers(2000000) + INSERT INTO dynamic_squash_performance_1 SELECT * FROM src_dynamic_squash_performance_1 + INSERT INTO dynamic_squash_performance_2 SELECT * FROM src_dynamic_squash_performance_2 + + DROP TABLE IF EXISTS dynamic_squash_performance_1 + DROP TABLE IF EXISTS dynamic_squash_performance_2 + DROP TABLE IF EXISTS src_dynamic_squash_performance_1 + DROP TABLE IF EXISTS src_dynamic_squash_performance_2 + + diff --git a/tests/performance/insert_select_squashing_variant.xml b/tests/performance/insert_select_squashing_variant.xml new file mode 100644 index 00000000000..5c59fc7b50f --- /dev/null +++ b/tests/performance/insert_select_squashing_variant.xml @@ -0,0 +1,34 @@ + + + 1000 + 0 + 1 + 1 + + + +CREATE TABLE variant_squash_performance +( + v Variant(Tuple(v1 Array(UInt64)), Tuple(v2 Array(UInt64)), Tuple(v3 Array(UInt64)), Tuple(v4 Array(UInt64)), Tuple(v5 Array(UInt64))) +) +ENGINE = Null; + + + +CREATE TABLE src_variant_squash_performance +( + v Variant(Tuple(v1 Array(UInt64)), Tuple(v2 Array(UInt64)), Tuple(v3 Array(UInt64)), Tuple(v4 Array(UInt64)), Tuple(v5 Array(UInt64))) +) +ENGINE = Memory; + + + + + + + INSERT INTO variant_squash_performance SELECT * FROM src_variant_squash_performance + + DROP TABLE IF EXISTS variant_squash_performance + DROP TABLE IF EXISTS src_variant_squash_performance + + diff --git a/tests/performance/optimize_functions_to_subcolumns.xml b/tests/performance/optimize_functions_to_subcolumns.xml new file mode 100644 index 00000000000..146af1605c4 --- /dev/null +++ b/tests/performance/optimize_functions_to_subcolumns.xml @@ -0,0 +1,26 @@ + + + 4 + + + + CREATE TABLE t_subcolumns (a Array(UInt64), s Nullable(String), m Map(String, UInt64)) ENGINE = MergeTree ORDER BY tuple() + + + + INSERT INTO t_subcolumns SELECT range(number % 20), toString(number), mapFromArrays(range(number % 20), range(number % 20)) FROM numbers_mt(50000000) + + + + OPTIMIZE TABLE t_subcolumns FINAL + + + SELECT count() FROM t_subcolumns WHERE NOT ignore(length(a)) + SELECT count() FROM t_subcolumns WHERE notEmpty(a) + SELECT count() FROM t_subcolumns WHERE NOT ignore(length(m)) + SELECT count() FROM t_subcolumns WHERE notEmpty(m) + SELECT count() FROM t_subcolumns WHERE isNotNull(s) + SELECT count(s) FROM t_subcolumns + + DROP TABLE t_subcolumns + diff --git a/tests/performance/storage_join_direct_join.xml b/tests/performance/storage_join_direct_join.xml index 867108ac2b7..0e67abb275e 100644 --- a/tests/performance/storage_join_direct_join.xml +++ b/tests/performance/storage_join_direct_join.xml @@ -16,4 +16,7 @@ SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null; SELECT keys.key, value1 FROM keys ANY LEFT JOIN dict AS d ON (keys.key = d.key) FORMAT Null SETTINGS allow_experimental_analyzer=1 + + DROP TABLE IF EXISTS keys + DROP TABLE IF EXISTS dict diff --git a/tests/queries/0_stateless/00408_http_keep_alive.reference b/tests/queries/0_stateless/00408_http_keep_alive.reference index 17a7fd690a8..5402036bfd7 100644 --- a/tests/queries/0_stateless/00408_http_keep_alive.reference +++ b/tests/queries/0_stateless/00408_http_keep_alive.reference @@ -1,6 +1,6 @@ < Connection: Keep-Alive -< Keep-Alive: timeout=10 +< Keep-Alive: timeout=10, max=? < Connection: Keep-Alive -< Keep-Alive: timeout=10 +< Keep-Alive: timeout=10, max=? < Connection: Keep-Alive -< Keep-Alive: timeout=10 +< Keep-Alive: timeout=10, max=? diff --git a/tests/queries/0_stateless/00408_http_keep_alive.sh b/tests/queries/0_stateless/00408_http_keep_alive.sh index 4bd0e494eb8..4a1cb4ed712 100755 --- a/tests/queries/0_stateless/00408_http_keep_alive.sh +++ b/tests/queries/0_stateless/00408_http_keep_alive.sh @@ -6,9 +6,10 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) URL="${CLICKHOUSE_PORT_HTTP_PROTO}://${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}/" -${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< "SELECT 1" 2>&1 | perl -lnE 'print if /Keep-Alive/'; -${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< " error here " 2>&1 | perl -lnE 'print if /Keep-Alive/'; -${CLICKHOUSE_CURL} -vsS "${URL}"ping 2>&1 | perl -lnE 'print if /Keep-Alive/'; +# the sed command here replaces the real number of left requests with a question mark, because it can vary and we don't really have control over it +${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< "SELECT 1" 2>&1 | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -i 'keep-alive'; +${CLICKHOUSE_CURL} -vsS "${URL}" --data-binary @- <<< " error here " 2>&1 | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -i 'keep-alive'; +${CLICKHOUSE_CURL} -vsS "${URL}"ping 2>&1 | perl -lnE 'print if /Keep-Alive/' | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -i 'keep-alive'; # no keep-alive: ${CLICKHOUSE_CURL} -vsS "${URL}"404/not/found/ 2>&1 | perl -lnE 'print if /Keep-Alive/'; diff --git a/tests/queries/0_stateless/00501_http_head.reference b/tests/queries/0_stateless/00501_http_head.reference index 8351327b356..807bcd4922e 100644 --- a/tests/queries/0_stateless/00501_http_head.reference +++ b/tests/queries/0_stateless/00501_http_head.reference @@ -2,11 +2,11 @@ HTTP/1.1 200 OK Connection: Keep-Alive Content-Type: text/tab-separated-values; charset=UTF-8 Transfer-Encoding: chunked -Keep-Alive: timeout=10 +Keep-Alive: timeout=10, max=? HTTP/1.1 200 OK Connection: Keep-Alive Content-Type: text/tab-separated-values; charset=UTF-8 Transfer-Encoding: chunked -Keep-Alive: timeout=10 +Keep-Alive: timeout=10, max=? diff --git a/tests/queries/0_stateless/00501_http_head.sh b/tests/queries/0_stateless/00501_http_head.sh index 60283f26833..30da64c31f0 100755 --- a/tests/queries/0_stateless/00501_http_head.sh +++ b/tests/queries/0_stateless/00501_http_head.sh @@ -4,8 +4,9 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -( ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=SELECT%201"; - ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=select+*+from+system.numbers+limit+1000000" ) | grep -v "Date:" | grep -v "X-ClickHouse-Server-Display-Name:" | grep -v "X-ClickHouse-Query-Id:" | grep -v "X-ClickHouse-Format:" | grep -v "X-ClickHouse-Timezone:" +# the sed command here replaces the real number of left requests with a question mark, because it can vary and we don't really have control over it +( ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=SELECT%201" | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I'; + ${CLICKHOUSE_CURL} -s --head "${CLICKHOUSE_URL}&query=select+*+from+system.numbers+limit+1000000" ) | sed -r 's/(keep-alive: timeout=10, max=)[0-9]+/\1?/I' | grep -v "Date:" | grep -v "X-ClickHouse-Server-Display-Name:" | grep -v "X-ClickHouse-Query-Id:" | grep -v "X-ClickHouse-Format:" | grep -v "X-ClickHouse-Timezone:" if [[ $(${CLICKHOUSE_CURL} -sS -X POST -I "${CLICKHOUSE_URL}&query=SELECT+1" | grep -c '411 Length Required') -ne 1 ]]; then echo FAIL diff --git a/tests/queries/0_stateless/00652_mergetree_mutations.sh b/tests/queries/0_stateless/00652_mergetree_mutations.sh index a9d7908a1af..edb306d3883 100755 --- a/tests/queries/0_stateless/00652_mergetree_mutations.sh +++ b/tests/queries/0_stateless/00652_mergetree_mutations.sh @@ -70,6 +70,21 @@ sleep 1 ${CLICKHOUSE_CLIENT} --query="INSERT INTO mutations_cleaner(x) VALUES (4)" sleep 0.1 +for i in {1..10} +do + + if [ "$(${CLICKHOUSE_CLIENT} --query="SELECT count() FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner'")" -eq 2 ]; then + break + fi + + if [[ $i -eq 100 ]]; then + echo "Timed out while waiting for outdated mutation record to be deleted!" + fi + + sleep 1 + ${CLICKHOUSE_CLIENT} --query="INSERT INTO mutations_cleaner(x) VALUES (4)" +done + # Check that the first mutation is cleaned ${CLICKHOUSE_CLIENT} --query="SELECT mutation_id, command, is_done FROM system.mutations WHERE database = '$CLICKHOUSE_DATABASE' and table = 'mutations_cleaner' ORDER BY mutation_id" diff --git a/tests/queries/0_stateless/00900_long_parquet_load.sh b/tests/queries/0_stateless/00900_long_parquet_load.sh index 1bafb033f56..3a7022ac0cf 100755 --- a/tests/queries/0_stateless/00900_long_parquet_load.sh +++ b/tests/queries/0_stateless/00900_long_parquet_load.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Tags: long, no-fasttest, no-debug +# Tags: long, no-fasttest, no-debug, no-asan, no-msan, no-tsan # # Load all possible .parquet files found in submodules. diff --git a/tests/queries/0_stateless/01049_window_view_window_functions.reference b/tests/queries/0_stateless/01049_window_view_window_functions.reference index 2d49664b280..073301104d2 100644 --- a/tests/queries/0_stateless/01049_window_view_window_functions.reference +++ b/tests/queries/0_stateless/01049_window_view_window_functions.reference @@ -67,3 +67,7 @@ SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 2020-01-10 00:00:00 SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa')); 2019-01-10 00:00:00 +SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleEnd(tuple()); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/01049_window_view_window_functions.sql b/tests/queries/0_stateless/01049_window_view_window_functions.sql index 617019bd2c6..fb2b4b4949a 100644 --- a/tests/queries/0_stateless/01049_window_view_window_functions.sql +++ b/tests/queries/0_stateless/01049_window_view_window_functions.sql @@ -36,3 +36,8 @@ SELECT hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, I SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa')); + +SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleEnd(tuple()); -- { serverError ILLEGAL_COLUMN } diff --git a/tests/queries/0_stateless/01172_transaction_counters.reference b/tests/queries/0_stateless/01172_transaction_counters.reference index 24083d7d40b..0fd73c7bcec 100644 --- a/tests/queries/0_stateless/01172_transaction_counters.reference +++ b/tests/queries/0_stateless/01172_transaction_counters.reference @@ -16,25 +16,25 @@ 7 all_3_3_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 7 all_4_4_0 (0,0,'00000000-0000-0000-0000-000000000000') 0 8 1 -1 1 AddPart 1 1 1 1 all_1_1_0 -2 1 Begin 1 1 1 1 -2 1 AddPart 1 1 1 1 all_2_2_0 -2 1 Rollback 1 1 1 1 -3 1 Begin 1 1 1 1 -3 1 AddPart 1 1 1 1 all_3_3_0 -3 1 Commit 1 1 1 0 -1 1 LockPart 1 1 1 1 all_2_2_0 -4 1 Begin 1 1 1 1 -4 1 AddPart 1 1 1 1 all_4_4_0 -4 1 Commit 1 1 1 0 -5 1 Begin 1 1 1 1 -5 1 AddPart 1 1 1 1 all_5_5_0 -5 1 LockPart 1 1 1 1 all_1_1_0 -5 1 LockPart 1 1 1 1 all_3_3_0 -5 1 LockPart 1 1 1 1 all_4_4_0 -5 1 LockPart 1 1 1 1 all_5_5_0 -5 1 UnlockPart 1 1 1 1 all_1_1_0 -5 1 UnlockPart 1 1 1 1 all_3_3_0 -5 1 UnlockPart 1 1 1 1 all_4_4_0 -5 1 UnlockPart 1 1 1 1 all_5_5_0 -5 1 Rollback 1 1 1 1 +1 AddPart 1 1 1 1 all_1_1_0 +2 Begin 1 1 1 1 +2 AddPart 1 1 1 1 all_2_2_0 +2 Rollback 1 1 1 1 +3 Begin 1 1 1 1 +3 AddPart 1 1 1 1 all_3_3_0 +3 Commit 1 1 1 0 +1 LockPart 1 1 1 1 all_2_2_0 +4 Begin 1 1 1 1 +4 AddPart 1 1 1 1 all_4_4_0 +4 Commit 1 1 1 0 +5 Begin 1 1 1 1 +5 AddPart 1 1 1 1 all_5_5_0 +5 LockPart 1 1 1 1 all_1_1_0 +5 LockPart 1 1 1 1 all_3_3_0 +5 LockPart 1 1 1 1 all_4_4_0 +5 LockPart 1 1 1 1 all_5_5_0 +5 UnlockPart 1 1 1 1 all_1_1_0 +5 UnlockPart 1 1 1 1 all_3_3_0 +5 UnlockPart 1 1 1 1 all_4_4_0 +5 UnlockPart 1 1 1 1 all_5_5_0 +5 Rollback 1 1 1 1 diff --git a/tests/queries/0_stateless/01172_transaction_counters.sql b/tests/queries/0_stateless/01172_transaction_counters.sql index a809e4196e9..581b45cd15c 100644 --- a/tests/queries/0_stateless/01172_transaction_counters.sql +++ b/tests/queries/0_stateless/01172_transaction_counters.sql @@ -42,7 +42,6 @@ rollback; system flush logs; select indexOf((select arraySort(groupUniqArray(tid)) from system.transactions_info_log where database=currentDatabase() and table='txn_counters'), tid), - (toDecimal64(now64(6), 6) - toDecimal64(event_time, 6)) < 100, type, thread_id!=0, length(query_id)=length(queryID()) or type='Commit' and query_id='', -- ignore fault injection after commit diff --git a/tests/queries/0_stateless/01246_buffer_flush.sh b/tests/queries/0_stateless/01246_buffer_flush.sh index aea91a0bf6b..3c7b9038e1f 100755 --- a/tests/queries/0_stateless/01246_buffer_flush.sh +++ b/tests/queries/0_stateless/01246_buffer_flush.sh @@ -27,7 +27,7 @@ function wait_until() function get_buffer_delay() { local buffer_insert_id=$1 && shift - query "SYSTEM FLUSH LOGS" + $CLICKHOUSE_CLIENT -q "SYSTEM FLUSH LOGS" query " WITH (SELECT event_time_microseconds FROM system.query_log WHERE current_database = '$CLICKHOUSE_DATABASE' AND type = 'QueryStart' AND query_id = '$buffer_insert_id') AS begin_, diff --git a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh index 47b5a4dea13..a07dd306b3e 100755 --- a/tests/queries/0_stateless/01600_parts_states_metrics_long.sh +++ b/tests/queries/0_stateless/01600_parts_states_metrics_long.sh @@ -1,10 +1,16 @@ #!/usr/bin/env bash -# Tags: long +# Tags: long, no-parallel CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh +function query() +{ + # NOTE: database_atomic_wait_for_drop_and_detach_synchronously needed only for local env, CI has it ON + ${CLICKHOUSE_CURL} -sS "${CLICKHOUSE_URL}&database_atomic_wait_for_drop_and_detach_synchronously=1" -d "$*" +} + # NOTE: database = $CLICKHOUSE_DATABASE is unwanted verify_sql="SELECT (SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics) @@ -18,13 +24,13 @@ verify() { for i in {1..5000} do - result=$( $CLICKHOUSE_CLIENT --query="$verify_sql" ) + result=$( query "$verify_sql" ) [ "$result" = "1" ] && echo "$result" && break sleep 0.1 if [[ $i -eq 5000 ]] then - $CLICKHOUSE_CLIENT " + query " SELECT sumIf(value, metric = 'PartsActive'), sumIf(value, metric = 'PartsOutdated') FROM system.metrics; SELECT sum(active), sum(NOT active) FROM system.parts; SELECT sum(active), sum(NOT active) FROM system.projection_parts; @@ -34,17 +40,17 @@ verify() done } -$CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=1 --query="DROP TABLE IF EXISTS test_table" -$CLICKHOUSE_CLIENT --query="CREATE TABLE test_table (data Date) ENGINE = MergeTree PARTITION BY toYear(data) ORDER BY data;" +query "DROP TABLE IF EXISTS test_table" +query "CREATE TABLE test_table (data Date) ENGINE = MergeTree PARTITION BY toYear(data) ORDER BY data;" -$CLICKHOUSE_CLIENT --query="INSERT INTO test_table VALUES ('1992-01-01')" +query "INSERT INTO test_table VALUES ('1992-01-01')" verify -$CLICKHOUSE_CLIENT --query="INSERT INTO test_table VALUES ('1992-01-02')" +query "INSERT INTO test_table VALUES ('1992-01-02')" verify -$CLICKHOUSE_CLIENT --query="OPTIMIZE TABLE test_table FINAL" +query "OPTIMIZE TABLE test_table FINAL" verify -$CLICKHOUSE_CLIENT --database_atomic_wait_for_drop_and_detach_synchronously=1 --query="DROP TABLE test_table" +query "DROP TABLE test_table" verify diff --git a/tests/queries/0_stateless/01656_test_query_log_factories_info.reference b/tests/queries/0_stateless/01656_test_query_log_factories_info.reference index 47b3133ceca..44531c19ab7 100644 --- a/tests/queries/0_stateless/01656_test_query_log_factories_info.reference +++ b/tests/queries/0_stateless/01656_test_query_log_factories_info.reference @@ -17,7 +17,7 @@ used_functions ['repeat'] arraySort(used_data_type_families) -['Array','Int32','Nullable','String'] +['Int32','Nullable','String'] used_database_engines ['Atomic'] diff --git a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference index 25aa9dc5dec..37993873983 100644 --- a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference +++ b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.reference @@ -1,2 +1,3 @@ 3 950 990 500 2000 +[950] [999] diff --git a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql index 5375823aa8e..956bf3711a2 100644 --- a/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql +++ b/tests/queries/0_stateless/01710_aggregate_projection_with_normalized_states.sql @@ -29,4 +29,6 @@ FROM cluster('test_cluster_two_shards', currentDatabase(), r) WHERE a = 'x' settings prefer_localhost_replica=0; +SELECT quantilesTimingMerge(0.95)(q), quantilesTimingMerge(toInt64(1))(q) FROM remote('127.0.0.{1,2}', currentDatabase(), r); + DROP TABLE r; diff --git a/tests/queries/0_stateless/01710_projection_vertical_merges.sql b/tests/queries/0_stateless/01710_projection_vertical_merges.sql index 0f80d659e92..0d745e44b10 100644 --- a/tests/queries/0_stateless/01710_projection_vertical_merges.sql +++ b/tests/queries/0_stateless/01710_projection_vertical_merges.sql @@ -1,4 +1,5 @@ --- Tags: long +-- Tags: long, no-parallel +-- set no-parallel tag is to prevent timeout of this test drop table if exists t; diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference index b70a1cb7c75..2c66db91737 100644 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.reference @@ -1,3 +1,9 @@ 8 +8 +1 4 4 +1 +4 +4 +1 diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh new file mode 100755 index 00000000000..1482730af2c --- /dev/null +++ b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Tags: no-random-merge-tree-settings, no-random-settings + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +sql="toUInt16OrNull(arrayFirst((v, k) -> (k = '4Id'), arr[2], arr[1]))" + +# Create the table and fill it +$CLICKHOUSE_CLIENT -n --query=" + CREATE TABLE test_extract(str String, arr Array(Array(String)) ALIAS extractAllGroupsHorizontal(str, '\\W(\\w+)=(\"[^\"]*?\"|[^\",}]*)')) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY tuple(); + INSERT INTO test_extract (str) WITH range(8) as range_arr, arrayMap(x-> concat(toString(x),'Id'), range_arr) as key, arrayMap(x -> rand() % 8, range_arr) as val, arrayStringConcat(arrayMap((x,y) -> concat(x,'=',toString(y)), key, val),',') as str SELECT str FROM numbers(500000); + ALTER TABLE test_extract ADD COLUMN 15Id Nullable(UInt16) DEFAULT $sql;" + +function test() +{ + # Execute two queries and compare if they have similar memory usage: + # The first query uses the default column value, while the second explicitly uses the same SQL as the default value. + # Follow https://github.com/ClickHouse/ClickHouse/issues/17317 for more info about the issue + where=$1 + + uuid_1=$(cat /proc/sys/kernel/random/uuid) + $CLICKHOUSE_CLIENT --query="SELECT uniq(15Id) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_1 + uuid_2=$(cat /proc/sys/kernel/random/uuid) + $CLICKHOUSE_CLIENT --query="SELECT uniq($sql) FROM test_extract $where SETTINGS max_threads=1" --query_id=$uuid_2 + $CLICKHOUSE_CLIENT -n --query=" + SYSTEM FLUSH LOGS; + WITH memory_1 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_1' AND type = 'QueryFinish' as memory_1), + memory_2 AS (SELECT memory_usage FROM system.query_log WHERE current_database = currentDatabase() AND query_id='$uuid_2' AND type = 'QueryFinish' as memory_2) + SELECT memory_1.memory_usage <= 1.2 * memory_2.memory_usage OR + memory_2.memory_usage <= 1.2 * memory_1.memory_usage FROM memory_1, memory_2;" +} + +test "" +test "PREWHERE 15Id < 4" +test "WHERE 15Id < 4" diff --git a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql b/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql deleted file mode 100644 index 2eec08635eb..00000000000 --- a/tests/queries/0_stateless/01903_correct_block_size_prediction_with_default.sql +++ /dev/null @@ -1,13 +0,0 @@ --- Tags: no-random-merge-tree-settings - -CREATE TABLE test_extract(str String, arr Array(Array(String)) ALIAS extractAllGroupsHorizontal(str, '\\W(\\w+)=("[^"]*?"|[^",}]*)')) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY tuple(); - -INSERT INTO test_extract (str) WITH range(8) as range_arr, arrayMap(x-> concat(toString(x),'Id'), range_arr) as key, arrayMap(x -> rand() % 8, range_arr) as val, arrayStringConcat(arrayMap((x,y) -> concat(x,'=',toString(y)), key, val),',') as str SELECT str FROM numbers(500000); - -ALTER TABLE test_extract ADD COLUMN `15Id` Nullable(UInt16) DEFAULT toUInt16OrNull(arrayFirst((v, k) -> (k = '4Id'), arr[2], arr[1])); - -SELECT uniq(15Id) FROM test_extract SETTINGS max_threads=1, max_memory_usage=100000000; - -SELECT uniq(15Id) FROM test_extract PREWHERE 15Id < 4 SETTINGS max_threads=1, max_memory_usage=100000000; - -SELECT uniq(15Id) FROM test_extract WHERE 15Id < 4 SETTINGS max_threads=1, max_memory_usage=100000000; diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.reference b/tests/queries/0_stateless/02122_join_group_by_timeout.reference index f314e22e519..6500560e8fc 100644 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.reference +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.reference @@ -1,4 +1,6 @@ -Code: 159 -0 +Code: 159 +query_duration 1 +0 +query_duration 1 Code: 159 0 diff --git a/tests/queries/0_stateless/02122_join_group_by_timeout.sh b/tests/queries/0_stateless/02122_join_group_by_timeout.sh index 8380c5dbd0c..b4644878544 100755 --- a/tests/queries/0_stateless/02122_join_group_by_timeout.sh +++ b/tests/queries/0_stateless/02122_join_group_by_timeout.sh @@ -1,27 +1,23 @@ #!/usr/bin/env bash -# Tags: no-debug - -# no-debug: Query is canceled by timeout after max_execution_time, -# but sending an exception to the client may hang -# for more than MAX_PROCESS_WAIT seconds in a slow debug build, -# and test will fail. CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -MAX_PROCESS_WAIT=5 - -IS_SANITIZER=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.warnings WHERE message like '%built with sanitizer%'") -if [ "$IS_SANITIZER" -gt 0 ]; then - # Query may hang for more than 5 seconds, especially in tsan build - MAX_PROCESS_WAIT=15 +TIMEOUT=5 +IS_SANITIZER_OR_DEBUG=$($CLICKHOUSE_CLIENT -q "SELECT count() FROM system.warnings WHERE message like '%built with sanitizer%' or message like '%built in debug mode%'") +if [ "$IS_SANITIZER_OR_DEBUG" -gt 0 ]; then + # Increase the timeout due to in debug/sanitizers build: + # - client is slow + # - stacktrace resolving is slow + TIMEOUT=15 fi # TCP CLIENT: As of today (02/12/21) uses PullingAsyncPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \ - "SELECT * FROM +query_id=$(random_str 12) +$CLICKHOUSE_CLIENT --query_id "$query_id" --max_execution_time 1 -q " + SELECT * FROM ( SELECT a.name as n FROM @@ -34,28 +30,35 @@ timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT --max_execution_time 1 -q \ GROUP BY n ) LIMIT 20 - FORMAT Null" 2>&1 | grep -o "Code: 159" | sort | uniq + FORMAT Null +" 2>&1 | grep -m1 -o "Code: 159" +$CLICKHOUSE_CLIENT -q "system flush logs" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" + ### Should stop pulling data and return what has been generated already (return code 0) -timeout -s KILL $MAX_PROCESS_WAIT $CLICKHOUSE_CLIENT -q \ - "SELECT a.name as n - FROM - ( - SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 - ) AS a, - ( - SELECT 'Name' as name2, number FROM system.numbers LIMIT 2000000 - ) as b - FORMAT Null - SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' - " +query_id=$(random_str 12) +$CLICKHOUSE_CLIENT --query_id "$query_id" -q " + SELECT a.name as n + FROM + ( + SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 + ) AS a, + ( + SELECT 'Name' as name2, number FROM system.numbers LIMIT 2000000 + ) as b + FORMAT Null + SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' +" echo $? +$CLICKHOUSE_CLIENT -q "system flush logs" +${CLICKHOUSE_CURL} -q -sS "$CLICKHOUSE_URL" -d "select 'query_duration', round(query_duration_ms/1000) from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and query_id = '$query_id' and type != 'QueryStart'" # HTTP CLIENT: As of today (02/12/21) uses PullingPipelineExecutor ### Should be cancelled after 1 second and return a 159 exception (timeout) -${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d \ - "SELECT * FROM +${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL&max_execution_time=1" -d " + SELECT * FROM ( SELECT a.name as n FROM @@ -68,12 +71,13 @@ ${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL&max_exec GROUP BY n ) LIMIT 20 - FORMAT Null" 2>&1 | grep -o "Code: 159" | sort | uniq + FORMAT Null +" 2>&1 | grep -o "Code: 159" | sort | uniq ### Should stop pulling data and return what has been generated already (return code 0) -${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL" -d \ - "SELECT a.name as n +${CLICKHOUSE_CURL} -q --max-time $TIMEOUT -sS "$CLICKHOUSE_URL" -d " + SELECT a.name as n FROM ( SELECT 'Name' as name, number FROM system.numbers LIMIT 2000000 @@ -83,5 +87,5 @@ ${CLICKHOUSE_CURL} -q --max-time $MAX_PROCESS_WAIT -sS "$CLICKHOUSE_URL" -d \ ) as b FORMAT Null SETTINGS max_execution_time = 1, timeout_overflow_mode = 'break' - " +" echo $? diff --git a/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql b/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql index f82f79dbe44..6491253cd5f 100644 --- a/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql +++ b/tests/queries/0_stateless/02319_lightweight_delete_on_merge_tree.sql @@ -102,7 +102,7 @@ ALTER TABLE t_proj ADD PROJECTION p_1 (SELECT avg(a), avg(b), count()) SETTINGS INSERT INTO t_proj SELECT number + 1, number + 1 FROM numbers(1000); -DELETE FROM t_proj WHERE a < 100; -- { serverError NOT_IMPLEMENTED } +DELETE FROM t_proj WHERE a < 100; -- { serverError SUPPORT_IS_DISABLED } SELECT avg(a), avg(b), count() FROM t_proj; diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.reference b/tests/queries/0_stateless/02354_vector_search_bugs.reference index d2c2d7e2fb7..a27b086e118 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.reference +++ b/tests/queries/0_stateless/02354_vector_search_bugs.reference @@ -1,17 +1,5 @@ Issue #52258: Empty Arrays or Arrays with default values are rejected -- Annoy -- Usearch It is possible to create parts with different Array vector sizes but there will be an error at query time -- Annoy -- Usearch Correctness of index with > 1 mark -- Annoy 1 [1,0] 0 9000 [9000,0] 0 -1 (1,0) 0 -9000 (9000,0) 0 -- Usearch -1 [1,0] 0 -9000 [9000,0] 0 -1 (1,0) 0 -9000 (9000,0) 0 diff --git a/tests/queries/0_stateless/02354_vector_search_bugs.sql b/tests/queries/0_stateless/02354_vector_search_bugs.sql index f03c36f6550..7c66b4b8e45 100644 --- a/tests/queries/0_stateless/02354_vector_search_bugs.sql +++ b/tests/queries/0_stateless/02354_vector_search_bugs.sql @@ -1,61 +1,22 @@ --- Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-ordinary-database, no-asan +-- Tags: no-fasttest, no-ordinary-database --- Tests vector search in ClickHouse, i.e. Annoy and Usearch indexes. Both index types share similarities in implementation and usage, --- therefore they are tested in a single file. - --- This file contains tests for various bugs and special cases - -SET allow_experimental_annoy_index = 1; -SET allow_experimental_usearch_index = 1; +-- Tests various bugs and special cases for vector indexes. +SET allow_experimental_vector_similarity_index = 1; SET enable_analyzer = 1; -- 0 vs. 1 produce slightly different error codes, make it future-proof DROP TABLE IF EXISTS tab; SELECT 'Issue #52258: Empty Arrays or Arrays with default values are rejected'; -SELECT '- Annoy'; - -CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree() ORDER BY (id); +CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree() ORDER BY id; INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } DROP TABLE tab; -CREATE TABLE tab (id UInt64, vec Tuple(Float32, Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree() ORDER BY (id); -INSERT INTO tab (id) VALUES (1); -- works fine, takes on default tuple (0.0, 0.0) -DROP TABLE tab; - -SELECT '- Usearch'; - -CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree() ORDER BY (id); -INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } -INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } -DROP TABLE tab; - -CREATE TABLE tab (id UInt64, vec Tuple(Float32, Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree() ORDER BY (id); -INSERT INTO tab (id) VALUES (1); -- works fine, takes on default tuple (0.0, 0.0) -DROP TABLE tab; - SELECT 'It is possible to create parts with different Array vector sizes but there will be an error at query time'; -SELECT '- Annoy'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -SYSTEM STOP MERGES tab; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); -INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); - -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } - -DROP TABLE tab; - -SELECT '- Usearch'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; SYSTEM STOP MERGES tab; INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2]); INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); @@ -70,9 +31,7 @@ DROP TABLE tab; SELECT 'Correctness of index with > 1 mark'; -SELECT '- Annoy'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes=0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity=8192; -- disable adaptive granularity due to bug +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity = 8192; -- disable adaptive granularity due to bug INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); WITH [1.0, 0.0] AS reference_vec @@ -88,58 +47,3 @@ ORDER BY L2Distance(vec, reference_vec) LIMIT 1; DROP TABLE tab; - --- same, but with Tuples -CREATE TABLE tab(id Int32, vec Tuple(Float32, Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes=0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity=8192; -- disable adaptive granularity due to bug -INSERT INTO tab SELECT number, (toFloat32(number), 0.0) from numbers(10000); - -WITH (1.0, 0.0) AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -WITH (9000.0, 0.0) AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -DROP TABLE tab; - -SELECT '- Usearch'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes=0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity=8192; -- disable adaptive granularity due to bug -INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); - -WITH [1.0, 0.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -WITH [9000.0, 0.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -DROP TABLE tab; - --- same, but with Tuples -CREATE TABLE tab(id Int32, vec Tuple(Float32, Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes=0, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, index_granularity=8192; -- disable adaptive granularity due to bug -INSERT INTO tab SELECT number, (toFloat32(number), 0.0) from numbers(10000); - -WITH (1.0, 0.0) AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -WITH (9000.0, 0.0) AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab -ORDER BY L2Distance(vec, reference_vec) -LIMIT 1; - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_default_granularity.reference b/tests/queries/0_stateless/02354_vector_search_default_granularity.reference index 2f97ed72c52..ab3cc71ff6b 100644 --- a/tests/queries/0_stateless/02354_vector_search_default_granularity.reference +++ b/tests/queries/0_stateless/02354_vector_search_default_granularity.reference @@ -1,7 +1,2 @@ -Test the default index granularity for vector search indexes (CREATE TABLE AND ALTER TABLE), should be 100 million for Annoy and USearch -- Annoy -100000000 -100000000 -- Usearch 100000000 100000000 diff --git a/tests/queries/0_stateless/02354_vector_search_default_granularity.sql b/tests/queries/0_stateless/02354_vector_search_default_granularity.sql index f15554505f8..acb69cb6ff8 100644 --- a/tests/queries/0_stateless/02354_vector_search_default_granularity.sql +++ b/tests/queries/0_stateless/02354_vector_search_default_granularity.sql @@ -1,35 +1,18 @@ --- Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-ordinary-database, no-asan +-- Tags: no-fasttest, no-ordinary-database --- Tests vector search in ClickHouse, i.e. Annoy and Usearch indexes. Both index types share similarities in implementation and usage, --- therefore they are tested in a single file. +-- Tests that vector search indexes use a (non-standard) index granularity of 100 mio by default. --- This file contains tests for the non-standard default granularity of vector search indexes. - -SET allow_experimental_annoy_index = 1; -SET allow_experimental_usearch_index = 1; - -SELECT 'Test the default index granularity for vector search indexes (CREATE TABLE AND ALTER TABLE), should be 100 million for Annoy and USearch'; - -SELECT '- Annoy'; +SET allow_experimental_vector_similarity_index = 1; +-- After CREATE TABLE DROP TABLE IF EXISTS tab; -CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE annoy) ENGINE=MergeTree ORDER BY id; -SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; - -DROP TABLE tab; -CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE=MergeTree ORDER BY id; -ALTER TABLE tab ADD INDEX idx(vec) TYPE annoy; -SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; - -SELECT '- Usearch'; - -DROP TABLE tab; -CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE usearch) ENGINE=MergeTree ORDER BY id; -SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; - -DROP TABLE tab; -CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE=MergeTree ORDER BY id; -ALTER TABLE tab ADD INDEX idx(vec) TYPE usearch; +CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; +SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; + +-- After ALTER TABLE +DROP TABLE tab; +CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id; +ALTER TABLE tab ADD INDEX idx(vec) TYPE vector_similarity('hnsw', 'L2Distance'); SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_detach_attach.reference b/tests/queries/0_stateless/02354_vector_search_detach_attach.reference new file mode 100644 index 00000000000..80eb091922e --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_detach_attach.reference @@ -0,0 +1,3 @@ +5 [0,2] 0 +6 [0,2.1] 0.09999990463256836 +7 [0,2.2] 0.20000004768371582 diff --git a/tests/queries/0_stateless/02354_vector_search_detach_attach.sql b/tests/queries/0_stateless/02354_vector_search_detach_attach.sql new file mode 100644 index 00000000000..f92eaddbbed --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_detach_attach.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector similarity indexes can be detached/attached. + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + +DETACH TABLE tab SYNC; +ATTACH TABLE tab; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference index 43bc49e8adc..f18daa6e02e 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.reference @@ -1,8 +1,8 @@ -At most two index arguments -1st argument (distance function) must be String -Rejects unsupported distance functions -2nd argument (Annoy: number of trees, USearch: scalar kind) must be UInt64 (Annoy) / String (Usearch) -Rejects unsupported scalar kinds (only Usearch) +Two or six index arguments +1st argument (method) must be String and hnsw +2nd argument (distance function) must be String and L2Distance or cosineDistance +3nd argument (quantization), if given, must be String and f32, f16, ... +4nd argument (M), if given, must be UInt64 and > 1 Must be created on single column -Must be created on Array(Float32) or Tuple(Float32, Float, ...) columns +Must be created on Array(Float32) columns Rejects INSERTs of Arrays with different sizes diff --git a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql index 6a4d6448629..de9d37e1000 100644 --- a/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql +++ b/tests/queries/0_stateless/02354_vector_search_index_creation_negative.sql @@ -1,63 +1,45 @@ --- Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-ordinary-database, no-asan +-- Tags: no-fasttest, no-ordinary-database --- Tests vector search in ClickHouse, i.e. Annoy and Usearch indexes. Both index types share similarities in implementation and usage, --- therefore they are tested in a single file. +-- Tests that various conditions are checked during creation of vector search indexes. --- This file tests that various conditions are checked during creation of vector search indexes. - -SET allow_experimental_annoy_index = 1; -SET allow_experimental_usearch_index = 1; +SET allow_experimental_vector_similarity_index = 1; DROP TABLE IF EXISTS tab; -SELECT 'At most two index arguments'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy('too', 'many', 'arguments')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('too', 'many', 'args')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +SELECT 'Two or six index arguments'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant_have_one_arg')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'three_args')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'more', 'than', 'six', 'args', '!')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -SELECT '1st argument (distance function) must be String'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +SELECT '1st argument (method) must be String and hnsw'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity(3, 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('not_hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT 'Rejects unsupported distance functions'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy('invalidDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('invalidDistance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '2nd argument (distance function) must be String and L2Distance or cosineDistance'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'invalid_distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } -SELECT '2nd argument (Annoy: number of trees, USearch: scalar kind) must be UInt64 (Annoy) / String (Usearch)'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy('L2Distance', 'not an UInt64')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch(3)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } - -SELECT 'Rejects unsupported scalar kinds (only Usearch)'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch('L2Distance', 'invalidKind')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '3nd argument (quantization), if given, must be String and f32, f16, ...'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1, 1, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'invalid', 2, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } +SELECT '4nd argument (M), if given, must be UInt64 and > 1'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 'invalid', 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'f32', 1, 1, 1)) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_DATA } SELECT 'Must be created on single column'; -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } - -SELECT 'Must be created on Array(Float32) or Tuple(Float32, Float, ...) columns'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } +SELECT 'Must be created on Array(Float32) columns'; SET allow_suspicious_low_cardinality_types = 1; - -CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } - -CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } - -CREATE TABLE tab(id Int32, vec Tuple(Float64), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Tuple(Float64), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } - -CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } - -CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } -CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec UInt64, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Float32, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(id Int32, vec Nullable(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } SELECT 'Rejects INSERTs of Arrays with different sizes'; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; -INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } -DROP TABLE tab; - -CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id; INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference new file mode 100644 index 00000000000..030bfa9b1bd --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.reference @@ -0,0 +1,2 @@ +Annoy +Usearch diff --git a/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql new file mode 100644 index 00000000000..0889aa74f7a --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_legacy_index_compatibility.sql @@ -0,0 +1,43 @@ +-- Indexes of type 'annoy' or 'usearch' are no longer supported. +-- Test what happens when ClickHouse encounters tables with the old index type. + +DROP TABLE IF EXISTS tab; + +SELECT 'Annoy'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX vec_idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id; + +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -- { serverError ILLEGAL_INDEX } + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; +-- (*) The search succeeds because the index contains no data (i.e. some shortcut) +-- If it had data (can't really test in SQL tests ...), this statement would also return an error, trust me. + +-- Detach and attach should work. +DETACH TABLE tab; +ATTACH TABLE tab; + +DROP TABLE tab; + +SELECT 'Usearch'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX vec_idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id; + +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -- { serverError ILLEGAL_INDEX } + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; +-- see above: (*) + +-- Detach and attach should work. +DETACH TABLE tab; +ATTACH TABLE tab; + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql new file mode 100644 index 00000000000..f1cfc041233 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_multiple_indexes.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that multiple vector similarity indexes can be created on the same column (even if that makes no sense) + +SET allow_experimental_vector_similarity_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); + +ALTER TABLE tab ADD INDEX idx(vec) TYPE minmax; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance'); +ALTER TABLE tab ADD INDEX vec_idx2(vec) TYPE vector_similarity('hnsw', 'L2Distance'); -- silly but creating the same index also works for non-vector indexes ... + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_queries.reference b/tests/queries/0_stateless/02354_vector_search_queries.reference index 41c1915ecc3..7c8e4c0ca59 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.reference +++ b/tests/queries/0_stateless/02354_vector_search_queries.reference @@ -1,41 +1,14 @@ -ARRAY, 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block -- Annoy: WHERE-type +10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block +- ORDER-BY-type 5 [0,2] 0 6 [0,2.1] 0.09999990463256836 7 [0,2.2] 0.20000004768371582 -- Annoy: ORDER-BY-type -5 [0,2] 0 -6 [0,2.1] 0.09999990463256836 -7 [0,2.2] 0.20000004768371582 -- Usearch: WHERE-type -5 [0,2] 0 -6 [0,2.1] 0.09999990463256836 -7 [0,2.2] 0.20000004768371582 -- Usearch: ORDER-BY-type -5 [0,2] 0 -6 [0,2.1] 0.09999990463256836 -7 [0,2.2] 0.20000004768371582 -- Annoy: WHERE-type, EXPLAIN -Expression ((Projection + Before ORDER BY)) - Limit (preliminary LIMIT (without OFFSET)) - Expression - ReadFromMergeTree (default.tab_annoy) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 1/1 - Skip - Name: idx - Description: annoy GRANULARITY 100000000 - Parts: 1/1 - Granules: 1/1 -- Annoy: ORDER-BY-type, EXPLAIN +- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_annoy) + ReadFromMergeTree (default.tab) Indexes: PrimaryKey Condition: true @@ -43,78 +16,20 @@ Expression (Projection) Granules: 1/1 Skip Name: idx - Description: annoy GRANULARITY 100000000 + Description: vector_similarity GRANULARITY 100000000 Parts: 1/1 Granules: 1/1 -- Usearch: WHERE-type, EXPLAIN -Expression ((Projection + Before ORDER BY)) - Limit (preliminary LIMIT (without OFFSET)) - Expression - ReadFromMergeTree (default.tab_usearch) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 1/1 - Skip - Name: idx - Description: usearch GRANULARITY 100000000 - Parts: 1/1 - Granules: 1/1 -- Usearch: ORDER-BY-type, EXPLAIN +12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block +- ORDER-BY-type +6 [0,2] 0 +7 [0,2.1] 0.09999990463256836 +8 [0,2.2] 0.20000004768371582 +- ORDER-BY-type, EXPLAIN Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_usearch) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 1/1 - Skip - Name: idx - Description: usearch GRANULARITY 100000000 - Parts: 1/1 - Granules: 1/1 -ARRAY vectors, 12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block -- Annoy: WHERE-type -6 [0,2] 0 -7 [0,2.1] 0.09999990463256836 -8 [0,2.2] 0.20000004768371582 -- Annoy: ORDER-BY-type -6 [0,2] 0 -7 [0,2.1] 0.09999990463256836 -8 [0,2.2] 0.20000004768371582 -- Usearch: WHERE-type -6 [0,2] 0 -7 [0,2.1] 0.09999990463256836 -8 [0,2.2] 0.20000004768371582 -- Usearch: ORDER-BY-type -6 [0,2] 0 -7 [0,2.1] 0.09999990463256836 -8 [0,2.2] 0.20000004768371582 -- Annoy: WHERE-type, EXPLAIN -Expression ((Projection + Before ORDER BY)) - Limit (preliminary LIMIT (without OFFSET)) - Expression - ReadFromMergeTree (default.tab_annoy) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 4/4 - Skip - Name: idx - Description: annoy GRANULARITY 2 - Parts: 1/1 - Granules: 1/4 -- Annoy: ORDER-BY-type, EXPLAIN -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_annoy) + ReadFromMergeTree (default.tab) Indexes: PrimaryKey Condition: true @@ -122,77 +37,20 @@ Expression (Projection) Granules: 4/4 Skip Name: idx - Description: annoy GRANULARITY 2 + Description: vector_similarity GRANULARITY 2 Parts: 1/1 Granules: 2/4 -- Usearch: WHERE-type, EXPLAIN -Expression ((Projection + Before ORDER BY)) - Limit (preliminary LIMIT (without OFFSET)) - Expression - ReadFromMergeTree (default.tab_usearch) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 4/4 - Skip - Name: idx - Description: usearch GRANULARITY 2 - Parts: 1/1 - Granules: 1/4 -- Usearch: ORDER-BY-type, EXPLAIN -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_usearch) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 4/4 - Skip - Name: idx - Description: usearch GRANULARITY 2 - Parts: 1/1 - Granules: 2/4 -TUPLE vectors and special cases -- Annoy: WHERE-type -0 (4.6,2.3) 0.5527864045000421 -1 (2,3.2) 0.15200169244542905 -2 (4.2,3.4) 0.37080174340866845 -- Annoy: ORDER-BY-type -6 (1,9.3) 0.005731362878640178 -1 (2,3.2) 0.15200169244542905 -7 (5.5,4.7) 0.3503476876550442 -- Usearch: WHERE-type -0 (4.6,2.3) 0.5527864045000421 -1 (2,3.2) 0.15200169244542905 -2 (4.2,3.4) 0.37080174340866845 -- Usearch: ORDER-BY-type -6 (1,9.3) 0.005731362878640178 -1 (2,3.2) 0.15200169244542905 -7 (5.5,4.7) 0.3503476876550442 -- Special case: MaximumDistance is negative -- Special case: MaximumDistance is negative -- Special case: setting "annoy_index_search_k_nodes" +Special cases +- ORDER-BY-type +6 [1,9.3] 0.005731362878640178 +1 [2,3.2] 0.15200169244542905 +7 [5.5,4.7] 0.3503476876550442 - Special case: setting "max_limit_for_ann_queries" Expression (Projection) Limit (preliminary LIMIT (without OFFSET)) Sorting (Sorting for ORDER BY) Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_annoy) - Indexes: - PrimaryKey - Condition: true - Parts: 1/1 - Granules: 4/4 -- Special case: setting "max_limit_for_ann_queries" -Expression (Projection) - Limit (preliminary LIMIT (without OFFSET)) - Sorting (Sorting for ORDER BY) - Expression (Before ORDER BY) - ReadFromMergeTree (default.tab_usearch) + ReadFromMergeTree (default.tab) Indexes: PrimaryKey Condition: true diff --git a/tests/queries/0_stateless/02354_vector_search_queries.sql b/tests/queries/0_stateless/02354_vector_search_queries.sql index 64051aa8544..dbf0fca32ab 100644 --- a/tests/queries/0_stateless/02354_vector_search_queries.sql +++ b/tests/queries/0_stateless/02354_vector_search_queries.sql @@ -1,238 +1,80 @@ --- Tags: no-fasttest, no-ubsan, no-cpu-aarch64, no-ordinary-database, no-asan +-- Tags: no-fasttest, no-ordinary-database --- Tests vector search in ClickHouse, i.e. Annoy and Usearch indexes. Both index types share similarities in implementation and usage, --- therefore they are tested in a single file. +-- Tests various simple approximate nearest neighborhood (ANN) queries that utilize vector search indexes. --- This file tests various simple approximate nearest neighborhood (ANN) queries that utilize vector search indexes. +SET allow_experimental_vector_similarity_index = 1; -SET allow_experimental_annoy_index = 1; -SET allow_experimental_usearch_index = 1; +SET enable_analyzer = 0; -SELECT 'ARRAY, 10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block'; +SELECT '10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block'; -DROP TABLE IF EXISTS tab_annoy; -DROP TABLE IF EXISTS tab_usearch; +DROP TABLE IF EXISTS tab; -CREATE TABLE tab_annoy(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; -INSERT INTO tab_annoy VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); - -CREATE TABLE tab_usearch(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch()) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; -INSERT INTO tab_usearch VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance')) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); -SELECT '- Annoy: WHERE-type'; +SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Annoy: ORDER-BY-type'; -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy +FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- Usearch: WHERE-type'; +SELECT '- ORDER-BY-type, EXPLAIN'; +EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Usearch: ORDER-BY-type'; -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch +FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- Annoy: WHERE-type, EXPLAIN'; -EXPLAIN indexes=1 -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; +DROP TABLE tab; -SELECT '- Annoy: ORDER-BY-type, EXPLAIN'; -EXPLAIN indexes=1 + +SELECT '12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy +FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -SELECT '- Usearch: WHERE-type, EXPLAIN'; -EXPLAIN indexes=1 +SELECT '- ORDER-BY-type, EXPLAIN'; +EXPLAIN indexes = 1 WITH [0.0, 2.0] AS reference_vec SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Usearch: ORDER-BY-type, EXPLAIN'; -EXPLAIN indexes=1 -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch +FROM tab ORDER BY L2Distance(vec, reference_vec) LIMIT 3; -DROP TABLE tab_annoy; -DROP TABLE tab_usearch; +DROP TABLE tab; -SELECT 'ARRAY vectors, 12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block'; +SELECT 'Special cases'; -- Not a systematic test, just to check that no bad things happen. +-- Test with non-default metric, M, ef_construction, ef_search -CREATE TABLE tab_annoy(id Int32, vec Array(Float32), INDEX idx vec TYPE annoy() GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab_annoy VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 'f32', 42, 99, 66) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); -CREATE TABLE tab_usearch(id Int32, vec Array(Float32), INDEX idx vec TYPE usearch() GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab_usearch VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); - -SELECT '- Annoy: WHERE-type'; +SELECT '- ORDER-BY-type'; WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Annoy: ORDER-BY-type'; -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; - -SELECT '- Usearch: WHERE-type'; -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Usearch: ORDER-BY-type'; -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; - -SELECT '- Annoy: WHERE-type, EXPLAIN'; -EXPLAIN indexes=1 -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Annoy: ORDER-BY-type, EXPLAIN'; -EXPLAIN indexes=1 -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_annoy -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; - -SELECT '- Usearch: WHERE-type, EXPLAIN'; -EXPLAIN indexes=1 -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch -WHERE L2Distance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Usearch: ORDER-BY-type, EXPLAIN'; -EXPLAIN indexes=1 -WITH [0.0, 2.0] AS reference_vec -SELECT id, vec, L2Distance(vec, reference_vec) -FROM tab_usearch -ORDER BY L2Distance(vec, reference_vec) -LIMIT 3; - -DROP TABLE tab_annoy; -DROP TABLE tab_usearch; - - -SELECT 'TUPLE vectors and special cases'; --- Not a systematic test, just to check that no bad things happen. --- Just for jun, use metric = 'cosineDistance' (Annoy/Usearch), tree_count = 200 (Annoy), scalarKind = 'f64' (Usearch) - -CREATE TABLE tab_annoy(id Int32, vec Tuple(Float32, Float32), INDEX idx vec TYPE annoy('cosineDistance', 200) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab_annoy VALUES (0, (4.6, 2.3)), (1, (2.0, 3.2)), (2, (4.2, 3.4)), (3, (5.3, 2.9)), (4, (2.4, 5.2)), (5, (5.3, 2.3)), (6, (1.0, 9.3)), (7, (5.5, 4.7)), (8, (6.4, 3.5)), (9, (5.3, 2.5)), (10, (6.4, 3.4)), (11, (6.4, 3.2)); - -CREATE TABLE tab_usearch(id Int32, vec Tuple(Float32, Float32), INDEX idx vec TYPE usearch('cosineDistance', 'f64') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; -INSERT INTO tab_usearch VALUES (0, (4.6, 2.3)), (1, (2.0, 3.2)), (2, (4.2, 3.4)), (3, (5.3, 2.9)), (4, (2.4, 5.2)), (5, (5.3, 2.3)), (6, (1.0, 9.3)), (7, (5.5, 4.7)), (8, (6.4, 3.5)), (9, (5.3, 2.5)), (10, (6.4, 3.4)), (11, (6.4, 3.2)); - -SELECT '- Annoy: WHERE-type'; -WITH (0.0, 2.0) AS reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_annoy -WHERE cosineDistance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Annoy: ORDER-BY-type'; -WITH (0.0, 2.0) AS reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_annoy +FROM tab ORDER BY cosineDistance(vec, reference_vec) LIMIT 3; -SELECT '- Usearch: WHERE-type'; -WITH (0.0, 2.0) AS reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_usearch -WHERE cosineDistance(vec, reference_vec) < 1.0 -LIMIT 3; - -SELECT '- Usearch: ORDER-BY-type'; -WITH (0.0, 2.0) AS reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_usearch -ORDER BY cosineDistance(vec, reference_vec) -LIMIT 3; - -SELECT '- Special case: MaximumDistance is negative'; -WITH (0.0, 2.0) as reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_annoy -WHERE cosineDistance(vec, reference_vec) < -1.0 -LIMIT 3; -- { serverError INCORRECT_QUERY } - -SELECT '- Special case: MaximumDistance is negative'; -WITH (0.0, 2.0) as reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_usearch -WHERE cosineDistance(vec, reference_vec) < -1.0 -LIMIT 3; -- { serverError INCORRECT_QUERY } - -SELECT '- Special case: setting "annoy_index_search_k_nodes"'; -WITH (0.0, 2.0) as reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_annoy -ORDER BY cosineDistance(vec, reference_vec) -LIMIT 3 -SETTINGS annoy_index_search_k_nodes=0; -- searches zero nodes --> no results - SELECT '- Special case: setting "max_limit_for_ann_queries"'; EXPLAIN indexes=1 -WITH (0.0, 2.0) as reference_vec +WITH [0.0, 2.0] as reference_vec SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_annoy +FROM tab ORDER BY cosineDistance(vec, reference_vec) LIMIT 3 -SETTINGS max_limit_for_ann_queries=2; -- LIMIT 3 > 2 --> don't use the ann index +SETTINGS max_limit_for_ann_queries = 2; -- LIMIT 3 > 2 --> don't use the ann index -SELECT '- Special case: setting "max_limit_for_ann_queries"'; -EXPLAIN indexes=1 -WITH (0.0, 2.0) as reference_vec -SELECT id, vec, cosineDistance(vec, reference_vec) -FROM tab_usearch -ORDER BY cosineDistance(vec, reference_vec) -LIMIT 3 -SETTINGS max_limit_for_ann_queries=2; -- LIMIT 3 > 2 --> don't use the ann index - -DROP TABLE tab_annoy; -DROP TABLE tab_usearch; +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.reference b/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql b/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql new file mode 100644 index 00000000000..da6494bf831 --- /dev/null +++ b/tests/queries/0_stateless/02354_vector_search_unquoted_index_parameters.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET allow_experimental_vector_similarity_index = 1; + +-- Tests that quoted and unquoted parameters can be passed to vector search indexes. + +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; + +CREATE TABLE tab1 (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance')); +CREATE TABLE tab2 (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity(hnsw, L2Distance)); + +DROP TABLE tab1; +DROP TABLE tab2; + +CREATE TABLE tab1 (id Int32, vec Array(Float32), PRIMARY KEY id); +CREATE TABLE tab2 (id Int32, vec Array(Float32), PRIMARY KEY id); + +ALTER TABLE tab1 ADD INDEX idx1(vec) TYPE vector_similarity('hnsw', 'L2Distance'); +ALTER TABLE tab2 ADD INDEX idx2(vec) TYPE vector_similarity(hnsw, L2Distance); + +DROP TABLE tab1; +DROP TABLE tab2; diff --git a/tests/queries/0_stateless/02378_part_log_profile_events.sql b/tests/queries/0_stateless/02378_part_log_profile_events.sql index 38d3f8b4c05..eec76d6f50e 100644 --- a/tests/queries/0_stateless/02378_part_log_profile_events.sql +++ b/tests/queries/0_stateless/02378_part_log_profile_events.sql @@ -39,7 +39,7 @@ SYSTEM FLUSH LOGS; SELECT if(count() == 2, 'Ok', 'Error: ' || toString(count())), - if(SUM(ProfileEvents['MergedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MergedRows']))), + if(SUM(ProfileEvents['MutatedRows']) == 512, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['MutatedRows']))), if(SUM(ProfileEvents['FileOpen']) > 1, 'Ok', 'Error: ' || toString(SUM(ProfileEvents['FileOpen']))) FROM system.part_log WHERE event_time > now() - INTERVAL 10 MINUTE diff --git a/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh b/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh index 98b571c5968..91e85eabcb8 100755 --- a/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh +++ b/tests/queries/0_stateless/02421_simple_queries_for_opentelemetry.sh @@ -9,7 +9,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # $2 - query function execute_query() { - ${CLICKHOUSE_CLIENT} --opentelemetry_start_trace_probability=1 --query_id $1 -nq " + ${CLICKHOUSE_CLIENT} --opentelemetry_start_trace_probability=1 --query_id $1 -q " ${2} " } @@ -18,7 +18,7 @@ function execute_query() # so we only to check the db.statement only function check_query_span_query_only() { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; SELECT attribute['db.statement'] as query FROM system.opentelemetry_span_log @@ -31,7 +31,7 @@ ${CLICKHOUSE_CLIENT} -nq " function check_query_span() { -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; SELECT attribute['db.statement'] as query, attribute['clickhouse.read_rows'] as read_rows, @@ -47,7 +47,7 @@ ${CLICKHOUSE_CLIENT} -nq " # # Set up # -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.opentelemetry_test; CREATE TABLE ${CLICKHOUSE_DATABASE}.opentelemetry_test (id UInt64) Engine=MergeTree Order By id; " @@ -79,4 +79,4 @@ check_query_span $query_id # ${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS ${CLICKHOUSE_DATABASE}.opentelemetry_test; -" \ No newline at end of file +" diff --git a/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh b/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh index ae3ea017fbb..178da822d41 100755 --- a/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh +++ b/tests/queries/0_stateless/02458_insert_select_progress_tcp.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists insert_select_progress_tcp; create table insert_select_progress_tcp(s UInt16) engine = MergeTree order by s; " diff --git a/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh b/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh index 4c850a6ec9e..92f519a9f8a 100755 --- a/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh +++ b/tests/queries/0_stateless/02476_analyzer_identifier_hints.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS test_table; CREATE TABLE test_table ( @@ -74,7 +74,7 @@ $CLICKHOUSE_CLIENT -q "SELECT 1 AS constant_value, arrayMap(lambda_argument -> l $CLICKHOUSE_CLIENT -q "WITH 1 AS constant_value SELECT (SELECT constant_valu) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS test_table_compound; CREATE TABLE test_table_compound ( @@ -142,7 +142,7 @@ $CLICKHOUSE_CLIENT -q "SELECT cast(tuple(1), 'Tuple(value_1 String)') AS constan $CLICKHOUSE_CLIENT -q "WITH cast(tuple(1), 'Tuple(value_1 String)') AS constant_value SELECT (SELECT constant_value.value_) SETTINGS enable_analyzer = 1;" 2>&1 \ | grep "Maybe you meant: \['constant_value.value_1'\]" &>/dev/null; -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS test_table_1; CREATE TABLE test_table_1 ( @@ -185,7 +185,7 @@ $CLICKHOUSE_CLIENT -q "SELECT ((1))::Tuple(a Tuple(b UInt32)) AS t, t.a.c SETTIN $CLICKHOUSE_CLIENT -q "SELECT 1"; -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE test_table; DROP TABLE test_table_compound; DROP TABLE test_table_1; diff --git a/tests/queries/0_stateless/02480_tets_show_full.sh b/tests/queries/0_stateless/02480_tets_show_full.sh index 5f5040ba128..50184857a1f 100755 --- a/tests/queries/0_stateless/02480_tets_show_full.sh +++ b/tests/queries/0_stateless/02480_tets_show_full.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) database=$($CLICKHOUSE_CLIENT -q 'SELECT currentDatabase()') -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS test_02480_table; DROP VIEW IF EXISTS test_02480_view; CREATE TABLE test_02480_table (id Int64) ENGINE=MergeTree ORDER BY id; diff --git a/tests/queries/0_stateless/02482_load_parts_refcounts.sh b/tests/queries/0_stateless/02482_load_parts_refcounts.sh index 5303824d97c..4dc7a7fd99b 100755 --- a/tests/queries/0_stateless/02482_load_parts_refcounts.sh +++ b/tests/queries/0_stateless/02482_load_parts_refcounts.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP TABLE IF EXISTS load_parts_refcounts SYNC; CREATE TABLE load_parts_refcounts (id UInt32) diff --git a/tests/queries/0_stateless/02494_query_cache_tag.reference b/tests/queries/0_stateless/02494_query_cache_tag.reference new file mode 100644 index 00000000000..f7be5c06ecf --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_tag.reference @@ -0,0 +1,12 @@ +1 +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = \'abc\' abc +--- +1 +1 +SELECT 1 SETTINGS use_query_cache = true +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = \'abc\' abc +--- +1 +1 +SELECT 1 SETTINGS use_query_cache = true abc +SELECT 1 SETTINGS use_query_cache = true def diff --git a/tests/queries/0_stateless/02494_query_cache_tag.sql b/tests/queries/0_stateless/02494_query_cache_tag.sql new file mode 100644 index 00000000000..62d36f6ebe6 --- /dev/null +++ b/tests/queries/0_stateless/02494_query_cache_tag.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Store the result a single query with a tag in the query cache and check that the system table knows about the tag +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; + +SELECT query, tag FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Store the result of the same query with two different tags. The cache should store two entries. +SELECT 1 SETTINGS use_query_cache = true; -- default query_cache_tag = '' +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; +SELECT query, tag FROM system.query_cache ORDER BY ALL; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Like before but the tag is set standalone. + +SET query_cache_tag = 'abc'; +SELECT 1 SETTINGS use_query_cache = true; + +SET query_cache_tag = 'def'; +SELECT 1 SETTINGS use_query_cache = true; + +SELECT query, tag FROM system.query_cache ORDER BY ALL; + +SYSTEM DROP QUERY CACHE; diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference index 77ef213b36d..4d004f2f78f 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.reference @@ -465,6 +465,37 @@ Expression ((Projection + Before ORDER BY)) ReadFromStorage (SystemOne) -- execute Float64 9007199254740994 +-- presence of an inner OFFSET retains the ORDER BY +-- query +WITH + t1 AS ( + SELECT a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2 +-- explain +Expression ((Projection + Before ORDER BY)) + Aggregating + Expression (Before GROUP BY) + Offset + Expression (Projection) + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + Before ORDER BY))) + ReadFromStorage (Values) +-- execute +0 -- disable common optimization to avoid functions to be lifted up (liftUpFunctions optimization), needed for testing with stateful function -- neighbor() as stateful function prevents removing inner ORDER BY since its result depends on order -- query diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh index 646e2501a99..6e132c55628 100755 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting.sh +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting.sh @@ -26,15 +26,15 @@ FROM ORDER BY number DESC ) ORDER BY number ASC" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;EXPLAIN $query" +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;EXPLAIN $query" function run_query { echo "-- query" echo "$1" echo "-- explain" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;EXPLAIN $1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;EXPLAIN $1" echo "-- execute" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;$1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;$1" } echo "-- Enabled query_plan_remove_redundant_sorting" @@ -302,6 +302,27 @@ FROM )" run_query "$query" +echo "-- presence of an inner OFFSET retains the ORDER BY" +query="WITH + t1 AS ( + SELECT a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2" +run_query "$query" + echo "-- disable common optimization to avoid functions to be lifted up (liftUpFunctions optimization), needed for testing with stateful function" ENABLE_OPTIMIZATION="SET query_plan_enable_optimizations=0;$ENABLE_OPTIMIZATION" echo "-- neighbor() as stateful function prevents removing inner ORDER BY since its result depends on order" diff --git a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference index b6a2e3182df..dd5ac7bf706 100644 --- a/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference +++ b/tests/queries/0_stateless/02496_remove_redundant_sorting_analyzer.reference @@ -464,6 +464,36 @@ Expression ((Project names + Projection)) ReadFromStorage (SystemOne) -- execute Float64 9007199254740994 +-- presence of an inner OFFSET retains the ORDER BY +-- query +WITH + t1 AS ( + SELECT a, b + FROM + VALUES ( + 'b UInt32, a Int32', + (1, 1), + (2, 0) + ) + ) +SELECT + SUM(a) +FROM ( + SELECT a, b + FROM t1 + ORDER BY 1 DESC, 2 + OFFSET 1 +) t2 +-- explain +Expression ((Project names + Projection)) + Aggregating + Expression ((Before GROUP BY + (Change column names to column identifiers + Project names))) + Offset + Sorting (Sorting for ORDER BY) + Expression ((Before ORDER BY + (Projection + (Change column names to column identifiers + (Project names + (Projection + Change column names to column identifiers)))))) + ReadFromStorage (Values) +-- execute +0 -- disable common optimization to avoid functions to be lifted up (liftUpFunctions optimization), needed for testing with stateful function -- neighbor() as stateful function prevents removing inner ORDER BY since its result depends on order -- query diff --git a/tests/queries/0_stateless/02497_storage_file_reader_selection.sh b/tests/queries/0_stateless/02497_storage_file_reader_selection.sh index aa43e81f131..27243dd47fa 100755 --- a/tests/queries/0_stateless/02497_storage_file_reader_selection.sh +++ b/tests/queries/0_stateless/02497_storage_file_reader_selection.sh @@ -13,6 +13,6 @@ $CLICKHOUSE_LOCAL --storage_file_read_method=mmap --print-profile-events -q "SEL $CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -q "CreatedReadBufferMMap" && echo 'Fail' || echo 0 $CLICKHOUSE_LOCAL --storage_file_read_method=pread --print-profile-events -q "SELECT * FROM file($DATA_FILE) FORMAT Null" 2>&1 | grep -F -q "CreatedReadBufferOrdinary" && echo 1 || echo 'Fail' -$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -nq "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }" +$CLICKHOUSE_CLIENT --storage_file_read_method=mmap -q "SELECT * FROM file('/dev/null', 'LineAsString') FORMAT Null -- { serverError BAD_ARGUMENTS }" rm $DATA_FILE diff --git a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh index 3c06119e8d2..6fd42fa940a 100755 --- a/tests/queries/0_stateless/02500_remove_redundant_distinct.sh +++ b/tests/queries/0_stateless/02500_remove_redundant_distinct.sh @@ -24,15 +24,15 @@ FROM ) )" -$CLICKHOUSE_CLIENT -nq "$DISABLE_OPTIMIZATION;EXPLAIN $query" +$CLICKHOUSE_CLIENT -q "$DISABLE_OPTIMIZATION;EXPLAIN $query" function run_query { echo "-- query" echo "$1" echo "-- explain" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;EXPLAIN $1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;EXPLAIN $1" echo "-- execute" - $CLICKHOUSE_CLIENT -nq "$ENABLE_OPTIMIZATION;$1" + $CLICKHOUSE_CLIENT -q "$ENABLE_OPTIMIZATION;$1" } echo "-- Enabled $OPTIMIZATION_SETTING" diff --git a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh index a0f228e6af4..c1aa24943c1 100755 --- a/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh +++ b/tests/queries/0_stateless/02514_database_replicated_no_arguments_for_rmt.sh @@ -14,8 +14,8 @@ ${CLICKHOUSE_CLIENT} -q "CREATE USER user_${CLICKHOUSE_DATABASE} settings databa ${CLICKHOUSE_CLIENT} -q "GRANT CREATE TABLE ON ${CLICKHOUSE_DATABASE}_db.* TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "GRANT TABLE ENGINE ON ReplicatedMergeTree TO user_${CLICKHOUSE_DATABASE}" ${CLICKHOUSE_CLIENT} -q "CREATE DATABASE ${CLICKHOUSE_DATABASE}_db engine = Replicated('/clickhouse/databases/${CLICKHOUSE_TEST_ZOOKEEPER_PREFIX}/${CLICKHOUSE_DATABASE}_db', '{shard}', '{replica}')" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;" -${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" -n --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_ok (x UInt32) engine = ReplicatedMergeTree order by x;" +${CLICKHOUSE_CLIENT} --distributed_ddl_output_mode=none --user "user_${CLICKHOUSE_DATABASE}" --query "CREATE TABLE ${CLICKHOUSE_DATABASE}_db.tab_rmt_fail (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/$CLICKHOUSE_TEST_ZOOKEEPER_PREFIX/root/{shard}', '{replica}') order by x; -- { serverError 80 }" ${CLICKHOUSE_CLIENT} --query "DROP DATABASE ${CLICKHOUSE_DATABASE}_db" ${CLICKHOUSE_CLIENT} -q "DROP USER user_${CLICKHOUSE_DATABASE}" diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.reference b/tests/queries/0_stateless/02532_send_logs_level_test.reference index 72f4ea06184..e69de29bb2d 100644 --- a/tests/queries/0_stateless/02532_send_logs_level_test.reference +++ b/tests/queries/0_stateless/02532_send_logs_level_test.reference @@ -1,4 +0,0 @@ - MergeTreeReadPoolBase: Will use min_marks_per_task=24 - MergeTreeMarksLoader: Loading marks from path data.cmrk3 - MergeTreeRangeReader: First reader returned: num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), requested columns: key - MergeTreeRangeReader: read() returned num_rows: 1, columns: 1, total_rows_per_granule: 1, no filter, column[0]: Int32(size = 1), sample block key diff --git a/tests/queries/0_stateless/02532_send_logs_level_test.sh b/tests/queries/0_stateless/02532_send_logs_level_test.sh index 506ac2331f2..a91e49ddd22 100755 --- a/tests/queries/0_stateless/02532_send_logs_level_test.sh +++ b/tests/queries/0_stateless/02532_send_logs_level_test.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key Int) engine=MergeTree order by tuple() settings min_bytes_for_wide_part = '1G', compress_marks = 1; insert into data values (1); @@ -18,6 +18,10 @@ $CLICKHOUSE_CLIENT -nm -q " # instead of "last" value, hence you cannot simply append another # --send_logs_level here. CLICKHOUSE_CLIENT_CLEAN=$(echo ${CLICKHOUSE_CLIENT} | sed 's/'"--send_logs_level=${CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL}"'/--send_logs_level=test/g') -$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -o -e '.*' -e '.*' -$CLICKHOUSE_CLIENT -q "drop table data" +set -e + +trap '$CLICKHOUSE_CLIENT -q "drop table data"' EXIT + +$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& (! grep -q -o -e '.*') +$CLICKHOUSE_CLIENT_CLEAN -q "select * from data SETTINGS merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0;" |& grep -q -o -e '.*' diff --git a/tests/queries/0_stateless/02555_davengers_rename_chain.sh b/tests/queries/0_stateless/02555_davengers_rename_chain.sh index 660a95846c4..196507dc72e 100755 --- a/tests/queries/0_stateless/02555_davengers_rename_chain.sh +++ b/tests/queries/0_stateless/02555_davengers_rename_chain.sh @@ -46,7 +46,7 @@ tables["wrong_metadata_compact"]="min_bytes_for_wide_part = 10000000" for table in "${!tables[@]}"; do settings="${tables[$table]}" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS $table; CREATE TABLE $table( @@ -69,7 +69,7 @@ for table in "${!tables[@]}"; do wait_column "$table" "\`a1\` UInt64" || exit 2 - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" -- { echoOn } SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null; @@ -82,7 +82,7 @@ for table in "${!tables[@]}"; do wait_mutation_loaded "$table" "b1 TO a" || exit 2 - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" -- { echoOn } SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null; @@ -94,7 +94,7 @@ for table in "${!tables[@]}"; do wait_for_all_mutations "$table" - $CLICKHOUSE_CLIENT -n --query=" + $CLICKHOUSE_CLIENT --query=" -- { echoOn } SELECT 'ECHO_ALIGNMENT_FIX' FORMAT Null; diff --git a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh index 509cd03f6c2..22b94e09b58 100755 --- a/tests/queries/0_stateless/02572_query_views_log_background_thread.sh +++ b/tests/queries/0_stateless/02572_query_views_log_background_thread.sh @@ -8,7 +8,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "drop table if exists buffer_02572; +${CLICKHOUSE_CLIENT} --ignore-error --query "drop table if exists buffer_02572; drop table if exists data_02572; drop table if exists copy_02572; drop table if exists mv_02572;" ${CLICKHOUSE_CLIENT} --query="create table copy_02572 (key Int) engine=Memory();" @@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} --query="insert into buffer_02572 values (1);" if [ $(( $(date +%s) - start )) -gt 6 ]; then # clickhouse test cluster is overloaded, will skip # ensure that the flush was not direct - ${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" + ${CLICKHOUSE_CLIENT} --ignore-error --query "select * from data_02572; select * from copy_02572;" fi # we cannot use OPTIMIZE, this will attach query context, so let's wait @@ -31,11 +31,11 @@ for _ in {1..100}; do done -${CLICKHOUSE_CLIENT} --ignore-error --multiquery --query "select * from data_02572; select * from copy_02572;" +${CLICKHOUSE_CLIENT} --ignore-error --query "select * from data_02572; select * from copy_02572;" ${CLICKHOUSE_CLIENT} --query="system flush logs;" ${CLICKHOUSE_CLIENT} --query="select count() > 0, lower(status::String), errorCodeToName(exception_code) from system.query_views_log where view_name = concatWithSeparator('.', currentDatabase(), 'mv_02572') and view_target = concatWithSeparator('.', currentDatabase(), 'copy_02572') - group by 2, 3;" \ No newline at end of file + group by 2, 3;" diff --git a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh index 03e0f363d71..79253648475 100755 --- a/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_read_bandwidth.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9; " @@ -26,7 +26,7 @@ read_methods=( for read_method in "${read_methods[@]}"; do query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "select * from data format Null settings max_local_read_bandwidth='1M', local_filesystem_read_method='$read_method'" - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT '$read_method', diff --git a/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh b/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh index 4f6a300c5b3..c5776134673 100755 --- a/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh +++ b/tests/queries/0_stateless/02703_max_local_write_bandwidth.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9; " @@ -13,7 +13,7 @@ $CLICKHOUSE_CLIENT -nm -q " query_id=$(random_str 10) # writes 1e6*8 bytes with 1M bandwith it should take (8-1)/1=7 seconds $CLICKHOUSE_CLIENT --query_id "$query_id" -q "insert into data select * from numbers(1e6) settings max_local_write_bandwidth='1M'" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query_duration_ms >= 7e3, diff --git a/tests/queries/0_stateless/02704_max_backup_bandwidth.sh b/tests/queries/0_stateless/02704_max_backup_bandwidth.sh index 8cb03a93a7a..7e914c4c539 100755 --- a/tests/queries/0_stateless/02704_max_backup_bandwidth.sh +++ b/tests/queries/0_stateless/02704_max_backup_bandwidth.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists data; create table data (key UInt64 CODEC(NONE)) engine=MergeTree() order by tuple() settings min_bytes_for_wide_part=1e9; " @@ -15,7 +15,7 @@ $CLICKHOUSE_CLIENT -q "insert into data select * from numbers(1e6)" query_id=$(random_str 10) $CLICKHOUSE_CLIENT --query_id "$query_id" -q "backup table data to Disk('backups', '$CLICKHOUSE_DATABASE/data/backup1')" --max_backup_bandwidth=1M > /dev/null -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " SYSTEM FLUSH LOGS; SELECT query_duration_ms >= 7e3, diff --git a/tests/queries/0_stateless/02724_limit_num_mutations.sh b/tests/queries/0_stateless/02724_limit_num_mutations.sh index 60888db0e2e..604cc9ff08e 100755 --- a/tests/queries/0_stateless/02724_limit_num_mutations.sh +++ b/tests/queries/0_stateless/02724_limit_num_mutations.sh @@ -23,7 +23,7 @@ function wait_for_alter() done } -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t_limit_mutations SYNC; CREATE TABLE t_limit_mutations (id UInt64, v UInt64) @@ -48,14 +48,14 @@ SELECT count() FROM system.mutations WHERE database = currentDatabase() AND tabl SHOW CREATE TABLE t_limit_mutations; " -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " ALTER TABLE t_limit_mutations UPDATE v = 6 WHERE 1 SETTINGS number_of_mutations_to_throw = 100; ALTER TABLE t_limit_mutations MODIFY COLUMN v String SETTINGS number_of_mutations_to_throw = 100, alter_sync = 0; " wait_for_alter "String" -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t_limit_mutations ORDER BY id; SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_limit_mutations' AND NOT is_done; SHOW CREATE TABLE t_limit_mutations; @@ -65,7 +65,7 @@ ${CLICKHOUSE_CLIENT} --query "SYSTEM START MERGES t_limit_mutations" wait_for_mutation "t_limit_mutations" "0000000003" -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT * FROM t_limit_mutations ORDER BY id; SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_limit_mutations' AND NOT is_done; SHOW CREATE TABLE t_limit_mutations; diff --git a/tests/queries/0_stateless/02725_async_insert_table_setting.sh b/tests/queries/0_stateless/02725_async_insert_table_setting.sh index 13911e8d677..14c2d335275 100755 --- a/tests/queries/0_stateless/02725_async_insert_table_setting.sh +++ b/tests/queries/0_stateless/02725_async_insert_table_setting.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " DROP TABLE IF EXISTS t_mt_async_insert; DROP TABLE IF EXISTS t_mt_sync_insert; @@ -19,7 +19,7 @@ url="${CLICKHOUSE_URL}&async_insert=0&wait_for_async_insert=1" ${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_async_insert VALUES (1, 'aa'), (2, 'bb')" ${CLICKHOUSE_CURL} -sS "$url" -d "INSERT INTO t_mt_sync_insert VALUES (1, 'aa'), (2, 'bb')" -${CLICKHOUSE_CLIENT} -n --query " +${CLICKHOUSE_CLIENT} --query " SELECT count() FROM t_mt_async_insert; SELECT count() FROM t_mt_sync_insert; diff --git a/tests/queries/0_stateless/02726_async_insert_flush_queue.sql b/tests/queries/0_stateless/02726_async_insert_flush_queue.sql index 97d644fa4d6..5d941adcb81 100644 --- a/tests/queries/0_stateless/02726_async_insert_flush_queue.sql +++ b/tests/queries/0_stateless/02726_async_insert_flush_queue.sql @@ -30,7 +30,9 @@ SELECT count() FROM t_async_inserts_flush; SYSTEM FLUSH ASYNC INSERT QUEUE; -SELECT count() FROM system.asynchronous_inserts; +SELECT count() FROM system.asynchronous_inserts +WHERE database = currentDatabase() AND table = 't_async_inserts_flush'; + SELECT count() FROM t_async_inserts_flush; DROP TABLE t_async_inserts_flush; diff --git a/tests/queries/0_stateless/02726_async_insert_flush_stress.sh b/tests/queries/0_stateless/02726_async_insert_flush_stress.sh index 876766d0780..61bbbd620f0 100755 --- a/tests/queries/0_stateless/02726_async_insert_flush_stress.sh +++ b/tests/queries/0_stateless/02726_async_insert_flush_stress.sh @@ -91,5 +91,5 @@ flush1 $TIMEOUT & wait ${CLICKHOUSE_CLIENT} -q "SYSTEM FLUSH ASYNC INSERT QUEUE" -${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.asynchronous_inserts" +${CLICKHOUSE_CLIENT} -q "SELECT count() FROM system.asynchronous_inserts WHERE database = currentDatabase() AND table = 'async_inserts'" ${CLICKHOUSE_CLIENT} -q "DROP TABLE IF EXISTS async_inserts"; diff --git a/tests/queries/0_stateless/02792_drop_projection_lwd.sql b/tests/queries/0_stateless/02792_drop_projection_lwd.sql index dcde7dcc600..dad7f7cd028 100644 --- a/tests/queries/0_stateless/02792_drop_projection_lwd.sql +++ b/tests/queries/0_stateless/02792_drop_projection_lwd.sql @@ -7,7 +7,7 @@ CREATE TABLE t_projections_lwd (a UInt32, b UInt32, PROJECTION p (SELECT * ORDER INSERT INTO t_projections_lwd SELECT number, number FROM numbers(100); -- LWD does not work, as expected -DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError NOT_IMPLEMENTED } +DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError SUPPORT_IS_DISABLED } KILL MUTATION WHERE database = currentDatabase() AND table = 't_projections_lwd' SYNC FORMAT Null; -- drop projection diff --git a/tests/queries/0_stateless/02841_local_assert.sh b/tests/queries/0_stateless/02841_local_assert.sh index a167c09da1f..dc49007b0f6 100755 --- a/tests/queries/0_stateless/02841_local_assert.sh +++ b/tests/queries/0_stateless/02841_local_assert.sh @@ -7,12 +7,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo "create table test (x UInt64) engine=Memory; -insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}" | $CLICKHOUSE_LOCAL -nm +insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS}" | $CLICKHOUSE_LOCAL -m echo "create table test (x UInt64) engine=Memory; -insert into test from infile 'data';" | $CLICKHOUSE_LOCAL -nm --ignore-error +insert into test from infile 'data';" | $CLICKHOUSE_LOCAL -m --ignore-error echo "create table test (x UInt64) engine=Memory; insert into test from infile 'data'; -- {clientError BAD_ARGUMENTS} -select 1" | $CLICKHOUSE_LOCAL -nm +select 1" | $CLICKHOUSE_LOCAL -m diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference b/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference deleted file mode 100644 index 02c41656a36..00000000000 --- a/tests/queries/0_stateless/02864_statistics_count_min_sketch.reference +++ /dev/null @@ -1,14 +0,0 @@ -CREATE TABLE default.tab\n(\n `a` String,\n `b` UInt64,\n `c` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -Test statistics count_min: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), equals(b, 0), equals(c, 0)) (removed) -Test statistics multi-types: - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'0\'), less(c, -90), greater(b, 900)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(a, \'10000\'), equals(b, 0), less(c, 0)) (removed) -Test LowCardinality and Nullable data type: -tab2 diff --git a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql b/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql deleted file mode 100644 index c730aa7b4a7..00000000000 --- a/tests/queries/0_stateless/02864_statistics_count_min_sketch.sql +++ /dev/null @@ -1,70 +0,0 @@ --- Tags: no-fasttest - -DROP TABLE IF EXISTS tab SYNC; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET allow_suspicious_low_cardinality_types=1; -SET mutations_sync = 2; - -CREATE TABLE tab -( - a String, - b UInt64, - c Int64, - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE tab; - -INSERT INTO tab select toString(number % 10000), number % 1000, -(number % 100), generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'Test statistics count_min:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min; -ALTER TABLE tab ADD STATISTICS c TYPE count_min; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c = 0/*100*/ and b = 0/*10*/ and a = '0'/*1*/) xx -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - - -SELECT 'Test statistics multi-types:'; - -ALTER TABLE tab ADD STATISTICS a TYPE count_min; -ALTER TABLE tab ADD STATISTICS b TYPE count_min, uniq, tdigest; -ALTER TABLE tab ADD STATISTICS c TYPE count_min, uniq, tdigest; -ALTER TABLE tab MATERIALIZE STATISTICS a, b, c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < -90/*900*/ and b > 900/*990*/ and a = '0'/*1*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8|_UInt16|_String', '') -FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 0/*9900*/ and b = 0/*10*/ and a = '10000'/*0*/) -WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -ALTER TABLE tab DROP STATISTICS a, b, c; - -DROP TABLE IF EXISTS tab SYNC; - - -SELECT 'Test LowCardinality and Nullable data type:'; -DROP TABLE IF EXISTS tab2 SYNC; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE tab2 -( - a LowCardinality(Int64) STATISTICS(count_min), - b Nullable(Int64) STATISTICS(count_min), - c LowCardinality(Nullable(Int64)) STATISTICS(count_min), - pk String, -) Engine = MergeTree() ORDER BY pk; - -select name from system.tables where name = 'tab2' and database = currentDatabase(); - -DROP TABLE IF EXISTS tab2 SYNC; diff --git a/tests/queries/0_stateless/02864_statistics_ddl.reference b/tests/queries/0_stateless/02864_statistics_ddl.reference index a7ff5caa0b0..0e453b0ee8a 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.reference +++ b/tests/queries/0_stateless/02864_statistics_ddl.reference @@ -1,31 +1,6 @@ -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -10 -0 -After drop statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(b, 10), less(a, 10)) (removed) -10 -CREATE TABLE default.tab\n(\n `a` Float64,\n `b` Int64,\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After add statistic -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After materialize statistic - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -20 -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(b, 10)) (removed) -20 -CREATE TABLE default.tab\n(\n `a` Float64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After rename - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, 10)) (removed) -20 +CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64 STATISTICS(tdigest, uniq),\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32 STATISTICS(tdigest, uniq),\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 +CREATE TABLE default.tab\n(\n `f64` Float64,\n `f64_tdigest` Float64 STATISTICS(tdigest),\n `f32` Float32,\n `s` String,\n `a` Array(Float64)\n)\nENGINE = MergeTree\nORDER BY tuple()\nSETTINGS index_granularity = 8192 diff --git a/tests/queries/0_stateless/02864_statistics_ddl.sql b/tests/queries/0_stateless/02864_statistics_ddl.sql index fe612efe2ac..32b56a842b7 100644 --- a/tests/queries/0_stateless/02864_statistics_ddl.sql +++ b/tests/queries/0_stateless/02864_statistics_ddl.sql @@ -1,59 +1,195 @@ --- Tests that various DDL statements create/drop/materialize statistics +-- Tags: no-fasttest +-- no-fasttest: 'count_min' sketches need a 3rd party library + +-- Tests that DDL statements which create / drop / materialize statistics + +SET mutations_sync = 1; DROP TABLE IF EXISTS tab; +-- Error case: Can't create statistics when allow_experimental_statistics = 0 +CREATE TABLE tab (col Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; + +-- Error case: Unknown statistics types are rejected +CREATE TABLE tab (col Float64 STATISTICS(no_statistics_type)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +-- Error case: The same statistics type can't exist more than once on a column +CREATE TABLE tab (col Float64 STATISTICS(tdigest, tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SET allow_suspicious_low_cardinality_types = 1; + +-- Statistics can only be created on columns of specific data types (depending on the statistics kind), (*) + +-- tdigest requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- uniq requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- count_min requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col Array(Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(count_min)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- CREATE TABLE was easy, ALTER is more fun CREATE TABLE tab ( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f32 Float32, + s String, + a Array(Float64) +) +Engine = MergeTree() +ORDER BY tuple(); +-- Error case: Unknown statistics types are rejected +-- (relevant for ADD and MODIFY) +ALTER TABLE tab ADD STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +-- for some reason, ALTER TABLE tab MODIFY STATISTICS IF EXISTS is not supported + +-- Error case: The same statistics type can't exist more than once on a column +-- (relevant for ADD and MODIFY) +-- Create the same statistics object twice +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +-- Create an statistics which exists already +ALTER TABLE tab ADD STATISTICS f64_tdigest TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64_tdigest TYPE tdigest; -- no-op +ALTER TABLE tab MODIFY STATISTICS f64_tdigest TYPE tdigest; -- no-op + +-- Error case: Column does not exist +-- (relevant for ADD, MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab ADD STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab CLEAR STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS no_such_column; -- { serverError ILLEGAL_STATISTICS } + +-- Error case: Column exists but has no statistics +-- (relevant for MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab MODIFY STATISTICS s TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab CLEAR STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS s; -- { serverError ILLEGAL_STATISTICS } + +-- We don't check systematically that that statistics can only be created via ALTER ADD STATISTICS on columns of specific data types (the +-- internal type validation code is tested already above, (*)). Only do a rudimentary check for each statistics type with a data type that +-- works and one that doesn't work. +-- tdigest +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +-- uniq +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +-- count_min +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE count_min; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE count_min; -- { serverError ILLEGAL_STATISTICS } + +-- Any data type changes on columns with statistics are disallowed, for simplicity even if the new data type is compatible with all existing +-- statistics objects (e.g. tdigest can be created on Float64 and UInt64) +ALTER TABLE tab MODIFY COLUMN f64_tdigest UInt64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- Finally, do a full-circle test of a good case. Print table definition after each step. +-- Intentionally specifying _two_ columns and _two_ statistics types to have that also tested. +SHOW CREATE TABLE tab; +ALTER TABLE tab ADD STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab MODIFY STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab CLEAR STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab MATERIALIZE STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab DROP STATISTICS f64, f32; SHOW CREATE TABLE tab; -INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; -SELECT count(*) FROM tab WHERE b < NULL and a < '10'; - -ALTER TABLE tab DROP STATISTICS a, b; - -SELECT 'After drop statistic'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -SHOW CREATE TABLE tab; - -ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; - -SELECT 'After add statistic'; - -SHOW CREATE TABLE tab; - -ALTER TABLE tab MATERIALIZE STATISTICS a, b; -INSERT INTO tab select number, -number, generateUUIDv4() FROM system.numbers LIMIT 10000; - -SELECT 'After materialize statistic'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -OPTIMIZE TABLE tab FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE b < 10 and a < 10; - -ALTER TABLE tab RENAME COLUMN b TO c; -SHOW CREATE TABLE tab; - -SELECT 'After rename'; -SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT count(*) FROM tab WHERE c < 10 and a < 10; - -DROP TABLE IF EXISTS tab; +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference new file mode 100644 index 00000000000..eb5e685597c --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.reference @@ -0,0 +1,12 @@ +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After truncate, insert, and materialize + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql new file mode 100644 index 00000000000..d469a4c2036 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_delayed_materialization_in_merge.sql @@ -0,0 +1,36 @@ +-- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +DROP TABLE IF EXISTS tab; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET enable_analyzer = 1; + +SET materialize_statistics_on_insert = 0; + +CREATE TABLE tab +( + a Int64 STATISTICS(tdigest), + b Int16 STATISTICS(tdigest), +) ENGINE = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. + +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +TRUNCATE TABLE tab; +SET mutations_sync = 2; +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +SELECT 'After truncate, insert, and materialize'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_exception.sql b/tests/queries/0_stateless/02864_statistics_exception.sql deleted file mode 100644 index 289ffee6600..00000000000 --- a/tests/queries/0_stateless/02864_statistics_exception.sql +++ /dev/null @@ -1,55 +0,0 @@ --- Tests creating/dropping/materializing statistics produces the right exceptions. - -DROP TABLE IF EXISTS tab; - --- Can't create statistics when allow_experimental_statistics = 0 -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - -SET allow_experimental_statistics = 1; - --- The same type of statistics can't exist more than once on a column -CREATE TABLE tab -( - a Float64 STATISTICS(tdigest, tdigest) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- Unknown statistics types are rejected -CREATE TABLE tab -( - a Float64 STATISTICS(no_statistics_type) -) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } - --- tDigest statistics can only be created on numeric columns -CREATE TABLE tab -( - a String STATISTICS(tdigest), -) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } - -CREATE TABLE tab -( - a Float64, - b String -) Engine = MergeTree() ORDER BY tuple(); - -ALTER TABLE tab ADD STATISTICS a TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -ALTER TABLE tab ADD STATISTICS IF NOT EXISTS a TYPE tdigest; -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; --- Statistics can be created only on integer columns -ALTER TABLE tab ADD STATISTICS b TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS b; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab DROP STATISTICS a; -ALTER TABLE tab DROP STATISTICS IF EXISTS a; -ALTER TABLE tab CLEAR STATISTICS a; -- { serverError ILLEGAL_STATISTICS } -ALTER TABLE tab CLEAR STATISTICS IF EXISTS a; -ALTER TABLE tab MATERIALIZE STATISTICS b; -- { serverError ILLEGAL_STATISTICS } - -ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -ALTER TABLE tab MODIFY COLUMN a Float64 TTL toDateTime(b) + INTERVAL 1 MONTH; -ALTER TABLE tab MODIFY COLUMN a Int64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } - -DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference deleted file mode 100644 index 5e969cf41cb..00000000000 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.reference +++ /dev/null @@ -1,10 +0,0 @@ -10 -10 -10 -statistics not used Condition less(b, 10_UInt8) moved to PREWHERE -statistics not used Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after merge Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after merge Condition less(b, 10_UInt8) moved to PREWHERE -statistics used after materialize Condition less(a, 10_UInt8) moved to PREWHERE -statistics used after materialize Condition less(b, 10_UInt8) moved to PREWHERE -2 0 diff --git a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql b/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql deleted file mode 100644 index 6606cff263f..00000000000 --- a/tests/queries/0_stateless/02864_statistics_materialize_in_merge.sql +++ /dev/null @@ -1,52 +0,0 @@ --- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). - -DROP TABLE IF EXISTS tab; - -SET enable_analyzer = 1; -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; - -SET materialize_statistics_on_insert = 0; - -CREATE TABLE tab -( - a Int64 STATISTICS(tdigest), - b Int16 STATISTICS(tdigest), -) ENGINE = MergeTree() ORDER BY tuple() -SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics not used'; - -OPTIMIZE TABLE tab FINAL; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after merge'; - -TRUNCATE TABLE tab; -SET mutations_sync = 2; - -INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; -ALTER TABLE tab MATERIALIZE STATISTICS a, b; - -SELECT count(*) FROM tab WHERE b < 10 and a < 10 SETTINGS log_comment = 'statistics used after materialize'; - -DROP TABLE tab; - -SYSTEM FLUSH LOGS; - -SELECT log_comment, message FROM system.text_log JOIN -( - SELECT Settings['log_comment'] AS log_comment, query_id FROM system.query_log - WHERE current_database = currentDatabase() - AND query LIKE 'SELECT count(*) FROM tab%' - AND type = 'QueryFinish' -) AS query_log USING (query_id) -WHERE message LIKE '%moved to PREWHERE%' -ORDER BY event_time_microseconds; - -SELECT count(), sum(ProfileEvents['MergeTreeDataWriterStatisticsCalculationMicroseconds']) -FROM system.query_log -WHERE current_database = currentDatabase() - AND query LIKE 'INSERT INTO tab SELECT%' - AND type = 'QueryFinish'; diff --git a/tests/queries/0_stateless/02864_statistics_predicates.reference b/tests/queries/0_stateless/02864_statistics_predicates.reference new file mode 100644 index 00000000000..ffbd7269e05 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_predicates.reference @@ -0,0 +1,98 @@ +u64 and = +10 +10 +10 +10 +0 +0 +0 +0 +10 +10 +10 +10 +u64 and < +70 +70 +70 +70 +80 +80 +80 +80 +70 +70 +70 +70 +f64 and = +10 +10 +10 +10 +0 +0 +0 +0 +10 +10 +10 +10 +0 +0 +0 +0 +f64 and < +70 +70 +70 +70 +80 +80 +80 +80 +70 +70 +70 +70 +80 +80 +80 +80 +dt and = +0 +0 +0 +0 +10 +10 +10 +10 +dt and < +10000 +10000 +10000 +10000 +70 +70 +70 +70 +b and = +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +5000 +0 +0 +0 +0 +s and = +10 +10 diff --git a/tests/queries/0_stateless/02864_statistics_predicates.sql b/tests/queries/0_stateless/02864_statistics_predicates.sql new file mode 100644 index 00000000000..779116cf19a --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_predicates.sql @@ -0,0 +1,214 @@ +-- Tags: no-fasttest +-- no-fasttest: 'count_min' sketches need a 3rd party library + +-- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + u64 UInt64, + u64_tdigest UInt64 STATISTICS(tdigest), + u64_count_min UInt64 STATISTICS(count_min), + u64_uniq UInt64 STATISTICS(uniq), + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f64_count_min Float64 STATISTICS(count_min), + f64_uniq Float64 STATISTICS(uniq), + dt DateTime, + dt_tdigest DateTime STATISTICS(tdigest), + dt_count_min DateTime STATISTICS(count_min), + dt_uniq DateTime STATISTICS(uniq), + b Bool, + b_tdigest Bool STATISTICS(tdigest), + b_count_min Bool STATISTICS(count_min), + b_uniq Bool STATISTICS(uniq), + s String, + -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest + s_count_min String STATISTICS(count_min) + -- s_uniq String STATISTICS(uniq), -- not supported by uniq +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab +-- SELECT number % 10000, number % 1000, -(number % 100) FROM system.numbers LIMIT 10000; +SELECT number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 2, + number % 2, + number % 2, + number % 2, + toString(number % 1000), + toString(number % 1000) +FROM system.numbers LIMIT 10000; + +-- u64 ---------------------------------------------------- + +SELECT 'u64 and ='; + +SELECT count(*) FROM tab WHERE u64 = 7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7; +SELECT count(*) FROM tab WHERE u64_count_min = 7; +SELECT count(*) FROM tab WHERE u64_uniq = 7; + +SELECT count(*) FROM tab WHERE u64 = 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE u64_count_min = 7.7; +SELECT count(*) FROM tab WHERE u64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE u64 = '7'; +SELECT count(*) FROM tab WHERE u64_tdigest = '7'; +SELECT count(*) FROM tab WHERE u64_count_min = '7'; +SELECT count(*) FROM tab WHERE u64_uniq = '7'; + +SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_count_min = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } + +SELECT 'u64 and <'; + +SELECT count(*) FROM tab WHERE u64 < 7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7; +SELECT count(*) FROM tab WHERE u64_count_min < 7; +SELECT count(*) FROM tab WHERE u64_uniq < 7; + +SELECT count(*) FROM tab WHERE u64 < 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE u64_count_min < 7.7; +SELECT count(*) FROM tab WHERE u64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE u64 < '7'; +SELECT count(*) FROM tab WHERE u64_tdigest < '7'; +SELECT count(*) FROM tab WHERE u64_count_min < '7'; +SELECT count(*) FROM tab WHERE u64_uniq < '7'; + +SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_count_min < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } + +-- f64 ---------------------------------------------------- + +SELECT 'f64 and ='; + +SELECT count(*) FROM tab WHERE f64 = 7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7; +SELECT count(*) FROM tab WHERE f64_count_min = 7; +SELECT count(*) FROM tab WHERE f64_uniq = 7; + +SELECT count(*) FROM tab WHERE f64 = 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE f64_count_min = 7.7; +SELECT count(*) FROM tab WHERE f64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE f64 = '7'; +SELECT count(*) FROM tab WHERE f64_tdigest = '7'; +SELECT count(*) FROM tab WHERE f64_count_min = '7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7'; + +SELECT count(*) FROM tab WHERE f64 = '7.7'; +SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; +SELECT count(*) FROM tab WHERE f64_count_min = '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; + +SELECT 'f64 and <'; + +SELECT count(*) FROM tab WHERE f64 < 7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7; +SELECT count(*) FROM tab WHERE f64_count_min < 7; +SELECT count(*) FROM tab WHERE f64_uniq < 7; + +SELECT count(*) FROM tab WHERE f64 < 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE f64_count_min < 7.7; +SELECT count(*) FROM tab WHERE f64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE f64 < '7'; +SELECT count(*) FROM tab WHERE f64_tdigest < '7'; +SELECT count(*) FROM tab WHERE f64_count_min < '7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7'; + +SELECT count(*) FROM tab WHERE f64 < '7.7'; +SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; +SELECT count(*) FROM tab WHERE f64_count_min < '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; + +-- dt ---------------------------------------------------- + +SELECT 'dt and ='; + +SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_count_min = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt = 7; +SELECT count(*) FROM tab WHERE dt_tdigest = 7; +SELECT count(*) FROM tab WHERE dt_count_min = 7; +SELECT count(*) FROM tab WHERE dt_uniq = 7; + +SELECT 'dt and <'; + +SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_count_min < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt < 7; +SELECT count(*) FROM tab WHERE dt_tdigest < 7; +SELECT count(*) FROM tab WHERE dt_count_min < 7; +SELECT count(*) FROM tab WHERE dt_uniq < 7; + +-- b ---------------------------------------------------- + +SELECT 'b and ='; + +SELECT count(*) FROM tab WHERE b = true; +SELECT count(*) FROM tab WHERE b_tdigest = true; +SELECT count(*) FROM tab WHERE b_count_min = true; +SELECT count(*) FROM tab WHERE b_uniq = true; + +SELECT count(*) FROM tab WHERE b = 'true'; +SELECT count(*) FROM tab WHERE b_tdigest = 'true'; +SELECT count(*) FROM tab WHERE b_count_min = 'true'; +SELECT count(*) FROM tab WHERE b_uniq = 'true'; + +SELECT count(*) FROM tab WHERE b = 1; +SELECT count(*) FROM tab WHERE b_tdigest = 1; +SELECT count(*) FROM tab WHERE b_count_min = 1; +SELECT count(*) FROM tab WHERE b_uniq = 1; + +SELECT count(*) FROM tab WHERE b = 1.1; +SELECT count(*) FROM tab WHERE b_tdigest = 1.1; +SELECT count(*) FROM tab WHERE b_count_min = 1.1; +SELECT count(*) FROM tab WHERE b_uniq = 1.1; + +-- s ---------------------------------------------------- + +SELECT 's and ='; + +SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported +SELECT count(*) FROM tab WHERE s_count_min = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_uniq = 7; -- not supported + +SELECT count(*) FROM tab WHERE s = '7'; +-- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported +SELECT count(*) FROM tab WHERE s_count_min = '7'; +-- SELECT count(*) FROM tab WHERE s_uniq = '7'; -- not supported + +DROP TABLE tab; diff --git a/tests/queries/0_stateless/02864_statistics_uniq.reference b/tests/queries/0_stateless/02864_statistics_uniq.reference deleted file mode 100644 index 77786dbdd8c..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.reference +++ /dev/null @@ -1,35 +0,0 @@ -CREATE TABLE default.t1\n(\n `a` Float64 STATISTICS(tdigest),\n `b` Int64 STATISTICS(tdigest),\n `c` Int64 STATISTICS(tdigest, uniq),\n `pk` String\n)\nENGINE = MergeTree\nORDER BY pk\nSETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192 -After insert - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After merge - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(equals(c, 11), less(a, 10), less(b, 10)) (removed) -After modify TDigest - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(c, -1), less(a, 10), less(b, 10)) (removed) -After drop - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 11), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), equals(c, 0), less(b, 10)) (removed) - Prewhere info - Prewhere filter - Prewhere filter column: and(less(a, 10), less(c, -1), less(b, 10)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_uniq.sql b/tests/queries/0_stateless/02864_statistics_uniq.sql deleted file mode 100644 index 0f5f353c045..00000000000 --- a/tests/queries/0_stateless/02864_statistics_uniq.sql +++ /dev/null @@ -1,73 +0,0 @@ -DROP TABLE IF EXISTS t1; - -SET allow_experimental_statistics = 1; -SET allow_statistics_optimize = 1; -SET mutations_sync = 1; - -CREATE TABLE t1 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Int64 STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; - -SHOW CREATE TABLE t1; - -INSERT INTO t1 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; -INSERT INTO t1 select 0, 0, 11, generateUUIDv4(); - -SELECT 'After insert'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -OPTIMIZE TABLE t1 FINAL; - -SELECT 'After merge'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -SELECT 'After modify TDigest'; -ALTER TABLE t1 MODIFY STATISTICS c TYPE TDigest; -ALTER TABLE t1 MATERIALIZE STATISTICS c; - -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - - -ALTER TABLE t1 DROP STATISTICS c; - -SELECT 'After drop'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 11 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c = 0 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; -SELECT replaceRegexpAll(explain, '__table1.|_UInt8|_Int8', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM t1 WHERE b < 10 and c < -1 and a < 10) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; - -DROP TABLE IF EXISTS t1; -DROP TABLE IF EXISTS t2; -SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t2 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c LowCardinality(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t2 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t2; -DROP TABLE IF EXISTS t3; - -CREATE TABLE t3 -( - a Float64 STATISTICS(tdigest), - b Int64 STATISTICS(tdigest), - c Nullable(Int64) STATISTICS(tdigest, uniq), - pk String, -) Engine = MergeTree() ORDER BY pk -SETTINGS min_bytes_for_wide_part = 0; -INSERT INTO t3 select number, -number, number/1000, generateUUIDv4() FROM system.numbers LIMIT 10000; - -DROP TABLE IF EXISTS t3; - diff --git a/tests/queries/0_stateless/02864_statistics_usage.reference b/tests/queries/0_stateless/02864_statistics_usage.reference new file mode 100644 index 00000000000..a9f669b88c1 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_usage.reference @@ -0,0 +1,20 @@ +After insert + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After drop statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(b, 10_UInt8), less(a, 10_UInt8)) (removed) +After add and materialize statistic + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After merge + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(b, 10_UInt8)) (removed) +After rename + Prewhere info + Prewhere filter + Prewhere filter column: and(less(a, 10_UInt8), less(c, 10_UInt8)) (removed) diff --git a/tests/queries/0_stateless/02864_statistics_usage.sql b/tests/queries/0_stateless/02864_statistics_usage.sql new file mode 100644 index 00000000000..4956bd27e87 --- /dev/null +++ b/tests/queries/0_stateless/02864_statistics_usage.sql @@ -0,0 +1,42 @@ +-- Test that the optimizer picks up column statistics +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest) +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab DROP STATISTICS a, b; +SELECT 'After drop statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After add and materialize statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab RENAME COLUMN b TO c; +SELECT 'After rename'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then c (statistics used) + +DROP TABLE IF EXISTS tab; diff --git a/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh b/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh index 4bc29ce4233..be0ef4e2648 100755 --- a/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh +++ b/tests/queries/0_stateless/02868_no_merge_across_partitions_final_with_lonely.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm -q """ +${CLICKHOUSE_CLIENT} -m -q """ DROP TABLE IF EXISTS with_lonely; CREATE TABLE with_lonely @@ -23,7 +23,7 @@ ORDER BY (id); """ create_optimize_partition() { - ${CLICKHOUSE_CLIENT} -nm -q """ + ${CLICKHOUSE_CLIENT} -m -q """ INSERT INTO with_lonely SELECT number, '$1', number*10, 0 FROM numbers(10); INSERT INTO with_lonely SELECT number+500000, '$1', number*10, 1 FROM numbers(10); """ @@ -39,7 +39,7 @@ create_optimize_partition "2022-10-29" create_optimize_partition "2022-10-30" create_optimize_partition "2022-10-31" -${CLICKHOUSE_CLIENT} -nm -q """ +${CLICKHOUSE_CLIENT} -m -q """ SYSTEM STOP MERGES with_lonely; INSERT INTO with_lonely SELECT number, '2022-11-01', number*10, 0 FROM numbers(10); diff --git a/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh b/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh index cc4ce9b122e..418e439e44b 100755 --- a/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh +++ b/tests/queries/0_stateless/02871_clickhouse_client_restart_pager.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # head by default print 10 rows, but it is not enough to query 11 rows, since # we need to overflow the default pipe size, hence just 1 million of rows (it # should be around 6 MiB in text representation, should be definitelly enough). -$CLICKHOUSE_CLIENT --ignore-error -nm --pager head -q " +$CLICKHOUSE_CLIENT --ignore-error -m --pager head -q " select * from numbers(1e6); -- { clientError CANNOT_WRITE_TO_FILE_DESCRIPTOR } select * from numbers(1e6); -- { clientError CANNOT_WRITE_TO_FILE_DESCRIPTOR } " diff --git a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh index 3f2b732e71b..3a7d861262e 100755 --- a/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh +++ b/tests/queries/0_stateless/02875_clickhouse_local_multiquery.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_CLIENT -q "select 1; select 2;" $CLICKHOUSE_LOCAL -q "select 1; select 2;" # -n is a no-op -$CLICKHOUSE_CLIENT -n -q "select 1; select 2;" -$CLICKHOUSE_LOCAL -n -q "select 1; select 2;" +$CLICKHOUSE_CLIENT -q "select 1; select 2;" +$CLICKHOUSE_LOCAL -q "select 1; select 2;" exit 0 diff --git a/tests/queries/0_stateless/02875_merge_engine_set_index.sh b/tests/queries/0_stateless/02875_merge_engine_set_index.sh index 355d83167a6..f40696c31a9 100755 --- a/tests/queries/0_stateless/02875_merge_engine_set_index.sh +++ b/tests/queries/0_stateless/02875_merge_engine_set_index.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " CREATE TABLE t1 ( a UInt32, @@ -57,7 +57,7 @@ ORDER BY b DESC FORMAT Null;" -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " SYSTEM FLUSH LOGS; SELECT ProfileEvents['SelectedMarks'] diff --git a/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh b/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh index 8bdaa47c111..dd08724456b 100755 --- a/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh +++ b/tests/queries/0_stateless/02878_use_structure_from_insertion_table_with_explicit_insert_columns.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_LOCAL -q "select 42 as x format Native" > $CLICKHOUSE_TEST_UNIQUE_NAME.native -$CLICKHOUSE_LOCAL -n -q " +$CLICKHOUSE_LOCAL -q " create table test (x UInt64, y UInt64) engine=Memory; insert into test (x) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native'); insert into test (y) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native'); diff --git a/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh b/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh index 315bbcd544f..c7270b65e19 100755 --- a/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh +++ b/tests/queries/0_stateless/02879_use_structure_from_insertion_table_with_defaults.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_LOCAL -q "select 1 as x format Native" > $CLICKHOUSE_TEST_UNIQUE_NAME.native -$CLICKHOUSE_LOCAL -n -q " +$CLICKHOUSE_LOCAL -q " create table test (x UInt64, y UInt64 default 42) engine=Memory; insert into test select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.native'); select * from test; diff --git a/tests/queries/0_stateless/02883_named_collections_override.sh b/tests/queries/0_stateless/02883_named_collections_override.sh index a08c795127d..915ce280226 100755 --- a/tests/queries/0_stateless/02883_named_collections_override.sh +++ b/tests/queries/0_stateless/02883_named_collections_override.sh @@ -8,7 +8,7 @@ u1="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection1" u2="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection2" u3="${CLICKHOUSE_TEST_UNIQUE_NAME}_collection3" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP NAMED COLLECTION IF EXISTS $u1; DROP NAMED COLLECTION IF EXISTS $u2; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh index 7f583087336..791515c82d6 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_1.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_1; CREATE TABLE t_async_insert_native_1 (id UInt64, s String) ENGINE = MergeTree ORDER BY id; " @@ -22,7 +22,7 @@ echo '{"id": 1, "s": "aaa"}' \ | $CLICKHOUSE_CLIENT $async_insert_options -q 'INSERT INTO t_async_insert_native_1 FORMAT JSONEachRow {"id": 2, "s": "bbb"}' 2>&1 \ | grep -o "NOT_IMPLEMENTED" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT sum(length(entries.bytes)) FROM system.asynchronous_inserts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_async_insert_native_1'; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh index b9b1854eaef..a8a9209ee68 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_2.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_2; CREATE TABLE t_async_insert_native_2 (id UInt64, s String) ENGINE = MergeTree ORDER BY id; " @@ -18,7 +18,7 @@ echo "(3, 'ccc') (4, 'ddd') (5, 'eee')" | $CLICKHOUSE_CLIENT $async_insert_optio wait -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT * FROM t_async_insert_native_2 ORDER BY id; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh index c9d399607d0..229f13eb821 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_3.sh @@ -7,7 +7,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) set -e -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_3; CREATE TABLE t_async_insert_native_3 (id UInt64, s String) ENGINE = MergeTree ORDER BY id; " @@ -21,7 +21,7 @@ $CLICKHOUSE_CLIENT $async_insert_options -q "INSERT INTO t_async_insert_native_3 wait -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT format, length(entries.bytes) FROM system.asynchronous_inserts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_async_insert_native_3' ORDER BY format; diff --git a/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh b/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh index 9118c11315c..e84c1ca8899 100755 --- a/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh +++ b/tests/queries/0_stateless/02884_async_insert_native_protocol_4.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_async_insert_native_4; CREATE TABLE t_async_insert_native_4 (id UInt64) ENGINE = MergeTree ORDER BY id; " @@ -20,7 +20,7 @@ echo "(2) (3) (4) (5)" | $CLICKHOUSE_CLIENT_WITH_LOG $async_insert_options --asy -q 'INSERT INTO t_async_insert_native_4 FORMAT Values' 2>&1 \ | grep -c "too much data" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " SELECT * FROM t_async_insert_native_4 ORDER BY id; SYSTEM FLUSH LOGS; diff --git a/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh b/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh index 2917ec86957..065658d4d56 100755 --- a/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh +++ b/tests/queries/0_stateless/02885_ephemeral_columns_from_file.sh @@ -9,7 +9,7 @@ $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', a $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', auto, 'x UInt64 Alias y, y UInt64')" 2>&1 | grep -c "BAD_ARGUMENTS" $CLICKHOUSE_LOCAL -q "select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl', auto, 'x UInt64 Materialized 42, y UInt64')" 2>&1 | grep -c "BAD_ARGUMENTS" -$CLICKHOUSE_LOCAL -n -q " +$CLICKHOUSE_LOCAL -q " create table test (x UInt64 Ephemeral, y UInt64 default x + 1) engine=Memory; insert into test (x, y) select * from file('$CLICKHOUSE_TEST_UNIQUE_NAME.jsonl'); select * from test; diff --git a/tests/queries/0_stateless/02895_npy_output_format.sh b/tests/queries/0_stateless/02895_npy_output_format.sh index a364e447062..74000bc298f 100755 --- a/tests/queries/0_stateless/02895_npy_output_format.sh +++ b/tests/queries/0_stateless/02895_npy_output_format.sh @@ -9,7 +9,7 @@ mkdir -p ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ rm -rf ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME:?}/* chmod 777 ${USER_FILES_PATH}/${CLICKHOUSE_TEST_UNIQUE_NAME}/ -${CLICKHOUSE_CLIENT} -n -q --ignore-error " +${CLICKHOUSE_CLIENT} -q --ignore-error " DROP DATABASE IF EXISTS npy_output_02895; CREATE DATABASE IF NOT EXISTS npy_output_02895; diff --git a/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh b/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh index d6775927f35..b4656c9e321 100755 --- a/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh +++ b/tests/queries/0_stateless/02895_peak_memory_usage_http_headers_regression.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS data; DROP TABLE IF EXISTS data2; DROP VIEW IF EXISTS mv1; diff --git a/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh b/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh index 64f5dd1a987..ef631d9ed1b 100755 --- a/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh +++ b/tests/queries/0_stateless/02903_empty_order_by_throws_error.sh @@ -5,13 +5,13 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # setting disabled and no order by or primary key; expect error -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() SETTINGS index_granularity = 8192; " 2>&1 \ | grep -F -q "You must provide an ORDER BY or PRIMARY KEY expression in the table definition." && echo 'OK' || echo 'FAIL' # setting disabled and primary key in table definition -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() PRIMARY KEY a SETTINGS index_granularity = 8192; SHOW CREATE TABLE test_empty_order_by; diff --git a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh index 5065da371a8..b77e5b0b402 100755 --- a/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh +++ b/tests/queries/0_stateless/02903_rmt_retriable_merge_exception.sh @@ -15,7 +15,7 @@ if [[ $($CLICKHOUSE_CLIENT -q "select count()>0 from system.clusters where clust cluster=test_cluster_database_replicated fi -$CLICKHOUSE_CLIENT -nm --distributed_ddl_output_mode=none -q " +$CLICKHOUSE_CLIENT -m --distributed_ddl_output_mode=none -q " drop table if exists rmt1; drop table if exists rmt2; @@ -46,7 +46,7 @@ part_name='%' # wait while there be at least one 'No active replica has part all_0_1_1 or covering part' in logs for _ in {0..50}; do - no_active_repilica_messages=$($CLICKHOUSE_CLIENT -nm -q " + no_active_repilica_messages=$($CLICKHOUSE_CLIENT -m -q " system flush logs; select count() @@ -65,7 +65,7 @@ for _ in {0..50}; do sleep 1 done -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " system start pulling replication log rmt2; system flush logs; diff --git a/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh b/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh index 7ac9b488be5..5f9dc6ea077 100755 --- a/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh +++ b/tests/queries/0_stateless/02904_empty_order_by_with_setting_enabled.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # setting enabled and no order by or primary key -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8) ENGINE = MergeTree() SETTINGS index_granularity = 8192; @@ -13,7 +13,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" " 2>&1 \ | grep -F -q "ORDER BY tuple()" && echo 'OK' || echo 'FAIL' # setting enabled and per-column primary key -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8 PRIMARY KEY, b String PRIMARY KEY) ENGINE = MergeTree() SETTINGS index_granularity = 8192; @@ -21,7 +21,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" " 2>&1 \ | grep -F -q "ORDER BY (a, b)" && echo 'OK' || echo 'FAIL' # setting enabled and primary key in table definition (not per-column or order by) -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8, b String) ENGINE = MergeTree() PRIMARY KEY (a) SETTINGS index_granularity = 8192; @@ -29,7 +29,7 @@ ${CLICKHOUSE_CLIENT} -n --query=" " 2>&1 \ | grep -F -q "ORDER BY a" && echo 'OK' || echo 'FAIL' # setting enabled and order by in table definition (no primary key) -${CLICKHOUSE_CLIENT} -n --query=" +${CLICKHOUSE_CLIENT} --query=" SET create_table_empty_primary_key_by_default = true; DROP TABLE IF EXISTS test_empty_order_by; CREATE TABLE test_empty_order_by(a UInt8, b String) ENGINE = MergeTree() ORDER BY (a, b) SETTINGS index_granularity = 8192; diff --git a/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh b/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh index 30ec50fa20f..e37f1e51c74 100755 --- a/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh +++ b/tests/queries/0_stateless/02907_backup_mv_with_no_inner_table.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists src; create table src (a Int32) engine = MergeTree() order by tuple(); @@ -15,14 +15,14 @@ create materialized view mv (a Int32) engine = MergeTree() order by tuple() as s uuid=$(${CLICKHOUSE_CLIENT} --query "select uuid from system.tables where table='mv' and database == currentDatabase()") inner_table=".inner_id.${uuid}" -${CLICKHOUSE_CLIENT} -nm --query "drop table \`$inner_table\` sync" +${CLICKHOUSE_CLIENT} -m --query "drop table \`$inner_table\` sync" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " set send_logs_level = 'error'; backup table ${CLICKHOUSE_DATABASE}.\`mv\` to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table mv; restore table ${CLICKHOUSE_DATABASE}.\`mv\` from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "RESTORED" diff --git a/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh b/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh index d59ebe400ee..f950954941f 100755 --- a/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh +++ b/tests/queries/0_stateless/02907_backup_mv_with_no_source_table.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists src; create table src (a Int32) engine = MergeTree() order by tuple(); @@ -15,18 +15,18 @@ drop table if exists mv; create materialized view mv to dst (a Int32) as select * from src; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table src; backup database ${CLICKHOUSE_DATABASE} on cluster test_shard_localhost to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table mv; set allow_deprecated_database_ordinary=1; restore table ${CLICKHOUSE_DATABASE}.mv on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); " | grep -o "RESTORED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists src; create table src (a Int32) engine = MergeTree() order by tuple(); @@ -37,13 +37,13 @@ drop table if exists mv; create materialized view mv to dst (a Int32) as select * from src; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table src; drop table dst; backup database ${CLICKHOUSE_DATABASE} on cluster test_shard_localhost to Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2'); " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table mv; set allow_deprecated_database_ordinary=1; restore table ${CLICKHOUSE_DATABASE}.mv on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2'); diff --git a/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh b/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh index 8ed36a7edd7..dc5793d1638 100755 --- a/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh +++ b/tests/queries/0_stateless/02907_backup_restore_default_nullable.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test; set data_type_default_nullable = 0; create table test (test String) ENGINE = MergeTree() ORDER BY tuple(); @@ -13,7 +13,7 @@ backup table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost to Disk ${CLICKHOUSE_CLIENT} --query "show create table test" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table test sync; set data_type_default_nullable = 1; restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); diff --git a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh index 742d24a97eb..eae307add10 100755 --- a/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh +++ b/tests/queries/0_stateless/02907_backup_restore_flatten_nested.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test; set flatten_nested = 0; create table test (test Array(Tuple(foo String, bar Float64))) ENGINE = MergeTree() ORDER BY tuple(); @@ -13,7 +13,7 @@ backup table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost to Disk ${CLICKHOUSE_CLIENT} --query "show create table test" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test2; set flatten_nested = 0; create table test2 (test Nested(foo String, bar Float64)) ENGINE = MergeTree() ORDER BY tuple(); @@ -22,7 +22,7 @@ backup table ${CLICKHOUSE_DATABASE}.test2 on cluster test_shard_localhost to Dis ${CLICKHOUSE_CLIENT} --query "show create table test2" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table test sync; set flatten_nested = 1; restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}'); @@ -30,7 +30,7 @@ restore table ${CLICKHOUSE_DATABASE}.test on cluster test_shard_localhost from D ${CLICKHOUSE_CLIENT} --query "show create table test" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table test2 sync; set flatten_nested = 1; restore table ${CLICKHOUSE_DATABASE}.test2 on cluster test_shard_localhost from Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}2'); diff --git a/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh b/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh index 57182050534..2cad15c6fcb 100755 --- a/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh +++ b/tests/queries/0_stateless/02907_clickhouse_dictionary_bug.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -n -q " +${CLICKHOUSE_CLIENT} -q " DROP DICTIONARY IF EXISTS 02907_dictionary; DROP TABLE IF EXISTS 02907_table; diff --git a/tests/queries/0_stateless/02907_system_backups_profile_events.sh b/tests/queries/0_stateless/02907_system_backups_profile_events.sh index 801056a2844..9a1d5a3db11 100755 --- a/tests/queries/0_stateless/02907_system_backups_profile_events.sh +++ b/tests/queries/0_stateless/02907_system_backups_profile_events.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " drop table if exists test; create table test (a Int32) engine = MergeTree() order by tuple(); " @@ -12,10 +12,10 @@ create table test (a Int32) engine = MergeTree() order by tuple(); backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME} backup_name="Disk('backups', '$backup_id')"; -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " backup table ${CLICKHOUSE_DATABASE}.test to $backup_name; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " select ProfileEvents['BackupEntriesCollectorMicroseconds'] > 10 from system.backups where name='Disk(\'backups\', \'$backup_id\')' " diff --git a/tests/queries/0_stateless/02908_Npy_files_caching.sh b/tests/queries/0_stateless/02908_Npy_files_caching.sh index 4845f740972..218e13efb95 100755 --- a/tests/queries/0_stateless/02908_Npy_files_caching.sh +++ b/tests/queries/0_stateless/02908_Npy_files_caching.sh @@ -7,13 +7,13 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=0" $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy') settings optimize_count_from_files=1" $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/one_dim.npy', auto, 'array Int64') settings optimize_count_from_files=1" -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " desc file('$CURDIR/data_npy/one_dim.npy'); select number_of_rows from system.schema_inference_cache where format='Npy'; " $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=0" $CLICKHOUSE_LOCAL -q "select count() from file('$CURDIR/data_npy/npy_big.npy') settings optimize_count_from_files=1" -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " desc file('$CURDIR/data_npy/npy_big.npy'); select number_of_rows from system.schema_inference_cache where format='Npy'; " diff --git a/tests/queries/0_stateless/02908_table_ttl_dependency.sh b/tests/queries/0_stateless/02908_table_ttl_dependency.sh index 70136b4a42b..0bc02426f61 100755 --- a/tests/queries/0_stateless/02908_table_ttl_dependency.sh +++ b/tests/queries/0_stateless/02908_table_ttl_dependency.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS 02908_dependent; DROP TABLE IF EXISTS 02908_main; @@ -14,11 +14,11 @@ $CLICKHOUSE_CLIENT -nm -q " CREATE TABLE 02908_dependent (a UInt32, ts DateTime) ENGINE = MergeTree ORDER BY a TTL ts + 1 WHERE a IN (SELECT a FROM ${CLICKHOUSE_DATABASE}.02908_main); " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE 02908_main; " 2>&1 | grep -F -q "HAVE_DEPENDENT_OBJECTS" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE 02908_dependent; DROP TABLE 02908_main; " diff --git a/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh b/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh index 8da144f90ca..75d491642ea 100755 --- a/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh +++ b/tests/queries/0_stateless/02909_settings_in_json_schema_cache.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS schema_inference_make_columns_nullable=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS schema_inference_make_columns_nullable=0; SELECT count() from system.schema_inference_cache where format = 'JSON' and additional_format_info like '%schema_inference_make_columns_nullable%';" diff --git a/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh b/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh index 80e38338751..7ad38e11e96 100755 --- a/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh +++ b/tests/queries/0_stateless/02915_input_table_function_in_subquery.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " CREATE TABLE IF NOT EXISTS ts_data_double_raw ( device_id UInt32 NOT NULL CODEC(ZSTD), diff --git a/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh b/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh index 5f0f41a956b..b6d6ca57768 100755 --- a/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh +++ b/tests/queries/0_stateless/02915_lazy_loading_of_base_backups.sh @@ -13,40 +13,40 @@ b_backup="Disk('backups', '$b_backup_id')" c_backup_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_c c_backup="Disk('backups', '$c_backup_id')" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS tbl1; DROP TABLE IF EXISTS tbl2; DROP TABLE IF EXISTS tbl3; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE TABLE tbl1 (a Int32) ENGINE = MergeTree() ORDER BY tuple(); " # The following BACKUP command must write backup 'a'. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $a_backup SETTINGS id='$a_backup_id'; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE TABLE tbl2 (a Int32) ENGINE = MergeTree() ORDER BY tuple(); " # The following BACKUP command must read backup 'a' and write backup 'b'. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $b_backup SETTINGS id='$b_backup_id', base_backup=$a_backup; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE TABLE tbl3 (a Int32) ENGINE = MergeTree() ORDER BY tuple(); " # The following BACKUP command must read only backup 'b' (and not 'a') and write backup 'c'. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " BACKUP DATABASE ${CLICKHOUSE_DATABASE} TO $c_backup SETTINGS id='$c_backup_id', base_backup=$b_backup; " | grep -o "BACKUP_CREATED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE tbl1; DROP TABLE tbl2; DROP TABLE tbl3; @@ -57,28 +57,28 @@ r2_restore_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_r2 r3_restore_id=${CLICKHOUSE_TEST_UNIQUE_NAME}_r3 # The following RESTORE command must read all 3 backups 'a', 'b', c' because the table 'tbl1' was in the first backup. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl1 FROM $c_backup SETTINGS id='$r1_restore_id'; " | grep -o "RESTORED" # The following RESTORE command must read only 2 backups 'b', c' (and not 'a') because the table 'tbl2' was in the second backup. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl2 FROM $c_backup SETTINGS id='$r2_restore_id'; " | grep -o "RESTORED" # The following RESTORE command must read only 1 backup 'c' (and not 'a' or 'b') because the table 'tbl3' was in the third backup. -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " RESTORE TABLE ${CLICKHOUSE_DATABASE}.tbl3 FROM $c_backup SETTINGS id='$r3_restore_id'; " | grep -o "RESTORED" all_ids="['$a_backup_id', '$b_backup_id', '$c_backup_id', '$r1_restore_id', '$r2_restore_id', '$r3_restore_id']" id_prefix_len=`expr "${CLICKHOUSE_TEST_UNIQUE_NAME}_" : '.*'` -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " SELECT substr(id, 1 + $id_prefix_len) as short_id, ProfileEvents['BackupsOpenedForRead'], ProfileEvents['BackupsOpenedForWrite'] FROM system.backups WHERE id IN ${all_ids} ORDER BY short_id " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE tbl1; DROP TABLE tbl2; DROP TABLE tbl3; diff --git a/tests/queries/0_stateless/02916_dictionary_access.sh b/tests/queries/0_stateless/02916_dictionary_access.sh index 08ee517ab3b..be62cc027ef 100755 --- a/tests/queries/0_stateless/02916_dictionary_access.sh +++ b/tests/queries/0_stateless/02916_dictionary_access.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}" dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE DICTIONARY IF NOT EXISTS ${dictname} ( id UInt64, @@ -23,15 +23,15 @@ ${CLICKHOUSE_CLIENT} -nm --query " SELECT dictGet(${dictname}, 'value', 1); " -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT * FROM dictionary(${dictname}); " 2>&1 | grep -o ACCESS_DENIED | uniq -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT dictGet(${dictname}, 'value', 1); " 2>&1 | grep -o ACCESS_DENIED | uniq -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP DICTIONARY IF EXISTS ${dictname}; DROP USER IF EXISTS ${username}; " diff --git a/tests/queries/0_stateless/02916_joinget_dependency.sh b/tests/queries/0_stateless/02916_joinget_dependency.sh index 6477ae8c967..ff9332cb57f 100755 --- a/tests/queries/0_stateless/02916_joinget_dependency.sh +++ b/tests/queries/0_stateless/02916_joinget_dependency.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # We test the dependency on the DROP -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE IF EXISTS Sub_distributed; DROP TABLE IF EXISTS Sub; DROP TABLE IF EXISTS Mapping; @@ -20,8 +20,8 @@ $CLICKHOUSE_CLIENT -q " DROP TABLE Mapping; " 2>&1 | grep -cm1 "HAVE_DEPENDENT_OBJECTS" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " DROP TABLE Sub_distributed; DROP TABLE Sub; DROP TABLE Mapping; -" \ No newline at end of file +" diff --git a/tests/queries/0_stateless/02930_client_file_log_comment.sh b/tests/queries/0_stateless/02930_client_file_log_comment.sh index 50cd587e4b5..393bffcaf59 100755 --- a/tests/queries/0_stateless/02930_client_file_log_comment.sh +++ b/tests/queries/0_stateless/02930_client_file_log_comment.sh @@ -14,7 +14,7 @@ echo -n 'select 4242' >> "$file2" $CLICKHOUSE_CLIENT --queries-file "$file1" "$file2" <<<'select 42' $CLICKHOUSE_CLIENT --log_comment foo --queries-file /dev/stdin <<<'select 424242' -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " system flush logs; select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 42' and type != 'QueryStart'; select query, log_comment from system.query_log where current_database = '$CLICKHOUSE_DATABASE' and event_date >= yesterday() and query = 'select 4242' and type != 'QueryStart'; diff --git a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh index d9e4a2c8f8b..76ab56a4570 100755 --- a/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh +++ b/tests/queries/0_stateless/02931_size_virtual_column_use_structure_from_insertion_table.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " create table test (x UInt64, y UInt32, size UInt64) engine=Memory; insert into test select c1, c2, _size from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') settings use_structure_from_insertion_table_in_table_functions=1; select * from test; diff --git a/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh index 0e23bb6c42b..9d1faf301d3 100755 --- a/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh +++ b/tests/queries/0_stateless/02940_system_stacktrace_optimizations.sh @@ -9,12 +9,12 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # no message at all echo "thread = 0" -$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to' +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_id = 0" |& grep -F -o 'Send signal to' # send messages to some threads echo "thread != 0" -$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)' +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_id != 0 format Null" |& grep -F -o 'Send signal to' | grep -v 'Send signal to 0 threads (total)' # there is no thread with comm="foo", so no signals will be sent echo "thread_name = 'foo'" -$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -nm -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)' +$CLICKHOUSE_CLIENT --allow_repeated_settings --send_logs_level=test -m -q "select * from system.stack_trace where thread_name = 'foo' format Null" |& grep -F -o 'Send signal to 0 threads (total)' diff --git a/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh b/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh index 27950866e81..44af2dbf26f 100755 --- a/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh +++ b/tests/queries/0_stateless/02943_rmt_alter_metadata_merge_checksum_mismatch.sh @@ -26,7 +26,7 @@ function wait_part() function restore_failpoints() { # restore entry error with failpoints (to avoid endless errors in logs) - $CLICKHOUSE_CLIENT -nm -q " + $CLICKHOUSE_CLIENT -m -q " system enable failpoint replicated_queue_unfail_entries; system sync replica $failed_replica; system disable failpoint replicated_queue_unfail_entries; @@ -34,7 +34,7 @@ function restore_failpoints() } trap restore_failpoints EXIT -$CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q " +$CLICKHOUSE_CLIENT -m --insert_keeper_fault_injection_probability=0 -q " drop table if exists data_r1; drop table if exists data_r2; @@ -45,7 +45,7 @@ $CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q " " # will fail ALTER_METADATA on one of replicas -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " system enable failpoint replicated_queue_fail_next_entry; alter table data_r1 drop index value_idx settings alter_sync=0; -- part all_0_0_0_1 @@ -80,7 +80,7 @@ fi # This will create MERGE_PARTS, on failed replica it will be fetched from source replica (since it does not have all parts to execute merge) $CLICKHOUSE_CLIENT -q "optimize table $success_replica final settings optimize_throw_if_noop=1, alter_sync=1" # part all_0_0_1_1 -$CLICKHOUSE_CLIENT -nm --insert_keeper_fault_injection_probability=0 -q " +$CLICKHOUSE_CLIENT -m --insert_keeper_fault_injection_probability=0 -q " insert into $success_replica (key) values (2); -- part all_2_2_0 -- Avoid 'Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet' system sync replica $success_replica pull; diff --git a/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh b/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh index 6cb184cb1fe..ec699d974d4 100755 --- a/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh +++ b/tests/queries/0_stateless/02947_merge_tree_index_table_3.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) user_name="${CLICKHOUSE_DATABASE}_test_user_02947" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_merge_tree_index; DROP USER IF EXISTS $user_name; @@ -44,7 +44,7 @@ $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT arr.size $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" $CLICKHOUSE_CLIENT --user "$user_name" --password "password" -q "SELECT b.mark FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true)" 2>&1 | grep -m1 -o "ACCESS_DENIED" || echo "OK" -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS t_merge_tree_index; DROP USER IF EXISTS $user_name; " diff --git a/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh b/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh index daa9c571a5d..d06aba8a4b6 100755 --- a/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh +++ b/tests/queries/0_stateless/02950_dictionary_ssd_cache_short_circuit.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n --query=" +$CLICKHOUSE_CLIENT --query=" CREATE TABLE source_table ( id UInt64, diff --git a/tests/queries/0_stateless/02950_distributed_initial_query_event.sh b/tests/queries/0_stateless/02950_distributed_initial_query_event.sh index 7f690a681c4..737d5c6b41e 100755 --- a/tests/queries/0_stateless/02950_distributed_initial_query_event.sh +++ b/tests/queries/0_stateless/02950_distributed_initial_query_event.sh @@ -6,7 +6,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh # CREATE TABLE local (x UInt8) Engine=Memory; # CREATE TABLE distributed ON CLUSTER cluster (p Date, i Int32) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), x) -$CLICKHOUSE_CLIENT -n -q " +$CLICKHOUSE_CLIENT -q " DROP TABLE IF EXISTS local; DROP TABLE IF EXISTS distributed; CREATE TABLE local (x UInt8) Engine=Memory; diff --git a/tests/queries/0_stateless/02974_backup_query_format_null.sh b/tests/queries/0_stateless/02974_backup_query_format_null.sh index ddba2f6de16..345a4f47b20 100755 --- a/tests/queries/0_stateless/02974_backup_query_format_null.sh +++ b/tests/queries/0_stateless/02974_backup_query_format_null.sh @@ -4,7 +4,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CUR_DIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS tbl; CREATE TABLE tbl (a Int32) ENGINE = MergeTree() ORDER BY tuple(); INSERT INTO tbl VALUES (2), (80), (-12345); @@ -14,7 +14,7 @@ backup_name="Disk('backups', '${CLICKHOUSE_TEST_UNIQUE_NAME}')" ${CLICKHOUSE_CLIENT} --query "BACKUP TABLE tbl TO ${backup_name} FORMAT Null" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE tbl; RESTORE ALL FROM ${backup_name} FORMAT Null " diff --git a/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh index e65c9654c9c..7ad5a2179f9 100755 --- a/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh +++ b/tests/queries/0_stateless/02981_insert_select_resize_to_max_insert_threads.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nq """ +${CLICKHOUSE_CLIENT} -q """ CREATE TABLE t1_local ( n UInt64, diff --git a/tests/queries/0_stateless/02995_index_10.sh b/tests/queries/0_stateless/02995_index_10.sh index 813cc49cbd8..e7e7d3c3b42 100755 --- a/tests/queries/0_stateless/02995_index_10.sh +++ b/tests/queries/0_stateless/02995_index_10.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS test; CREATE TABLE test (a String, b String, c String) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 11; @@ -37,8 +37,9 @@ WHERE a >= (round(pow(sipHash64(1, try), 1 / (3 + sipHash64(2, try) % 8))) AS a1 AND b <= (b1 + round(pow(sipHash64(7, try), 1 / (3 + sipHash64(8, try) % 8))))::String AND c >= (round(pow(sipHash64(9, try), 1 / (3 + sipHash64(10, try) % 8))) AS c1)::String AND c <= (c1 + round(pow(sipHash64(11, try), 1 / (3 + sipHash64(12, try) % 8))))::String -HAVING count() > 0; -" +HAVING count() > 0 +SETTINGS trace_profile_events=0 -- test is too slow with profiling +;" done | ${CLICKHOUSE_CLIENT} -${CLICKHOUSE_CLIENT} "DROP TABLE test" +${CLICKHOUSE_CLIENT} -q "DROP TABLE test" diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference index 46f24f73356..a70e70ef7e9 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.reference @@ -382,6 +382,253 @@ key1 e 5 5 5 key1 C 3 4 5 key2 a2 1 1 1 0 0 \N key4 f 2 3 4 key4 F 1 1 1 SET join_algorithm='hash'; +SELECT t1.*, t2.* FROM t1 LEFT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +key1 d 4 7 2 0 0 \N +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 d 4 7 2 key1 0 0 \N +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SELECT t1.*, t2.* FROM t1 RIGHT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SET join_algorithm='grace_hash'; +SELECT t1.*, t2.* FROM t1 LEFT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +key1 d 4 7 2 0 0 \N +key1 e 5 5 5 0 0 \N +key2 a2 1 1 1 0 0 \N +key4 f 2 3 4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 LEFT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 LEFT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT t1.*, t2.* from t1 LEFT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 d 4 7 2 key1 0 0 \N +key1 e 5 5 5 key1 0 0 \N +key2 a2 1 1 1 key2 0 0 \N +key4 f 2 3 4 key4 0 0 \N +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 LEFT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SELECT t1.*, t2.* FROM t1 RIGHT ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT SEMI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 a 1 1 2 key1 C 3 4 5 +key1 a 1 1 2 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT SEMI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SELECT t1.*, t2.* FROM t1 RIGHT ANTI JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT t1.*, t2.* from t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); + 0 0 \N key1 A 1 2 1 + 0 0 \N key3 a3 1 1 1 + 0 0 \N key4 F 1 1 1 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 RIGHT ANTI JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +SET join_algorithm='hash'; +SELECT t1.* FROM t1 LEFT ANY JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 b 2 3 2 +key1 c 3 2 1 +key1 d 4 7 2 +key1 e 5 5 5 +key2 a2 1 1 1 +key4 f 2 3 4 +SELECT t1.* FROM t1 LEFT SEMI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 b 2 3 2 +key1 c 3 2 1 +key1 d 4 7 2 +key2 a2 1 1 1 +key4 f 2 3 4 +SELECT t1.* FROM t1 LEFT ANTI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 e 5 5 5 +SELECT t1.* FROM t1 RIGHT ANY JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +SELECT t1.* FROM t1 RIGHT SEMI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +key1 a 1 1 2 +SELECT t1.* FROM t1 RIGHT ANTI JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +SET join_algorithm='hash'; SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY (t1.key, t1.attr, t2.key, t2.attr); key1 a 1 1 2 key1 A 1 2 1 key1 a 1 1 2 key1 B 2 1 2 @@ -452,3 +699,46 @@ key2 a2 1 1 1 key1 A 1 2 1 key2 a2 1 1 1 key3 a3 1 1 1 key2 a2 1 1 1 key4 F 1 1 1 key4 f 2 3 4 key1 B 2 1 2 +SET join_algorithm='hash'; +SELECT t1.*, t2.* FROM t1 INNER ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 INNER ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SET join_algorithm='grace_hash'; +SELECT t1.*, t2.* FROM t1 INNER ANY JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 C 3 4 5 +key1 d 4 7 2 key1 D 4 1 6 +key4 f 2 3 4 key4 F 1 1 1 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 A 1 2 1 +key1 b 2 3 2 key1 B 2 1 2 +key1 c 3 2 1 key1 B 2 1 2 +key1 d 4 7 2 key1 D 4 1 6 +SELECT t1.*, t2.* from t1 INNER ANY JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +key1 a 1 1 2 key1 B 2 1 2 +key1 b 2 3 2 key1 C 3 4 5 +key1 c 3 2 1 key1 D 4 1 6 +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 INNER ANY JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +1 1 1 1 1 1 +SET join_algorithm='hash'; +SELECT t1.* FROM t1 INNER ANY JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +key1 a 1 1 2 +key1 b 2 3 2 +key1 c 3 2 1 +key1 d 4 7 2 +key2 a2 1 1 1 diff --git a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 index 61ad5ec0bf1..b300881c562 100644 --- a/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 +++ b/tests/queries/0_stateless/03006_join_on_inequal_expression_fast.sql.j2 @@ -22,6 +22,26 @@ SELECT t1.*, t2.* FROM t1 {{ join_type }} JOIN t2 ON t1.key = t2.key AND (t1.a=2 {% endfor -%} {% endfor -%} +{% for algorithm in ['hash', 'grace_hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['LEFT', 'RIGHT'] -%} +{% for join_strictness in ['ANY', 'SEMI', 'ANTI'] -%} +SELECT t1.*, t2.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 {{ join_type }} {{ join_strictness }} JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +{% endfor -%} +{% endfor -%} +{% endfor -%} + +{% for algorithm in ['hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['LEFT', 'RIGHT'] -%} +{% for join_strictness in ['ANY', 'SEMI', 'ANTI'] -%} +SELECT t1.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +{% endfor -%} +{% endfor -%} +{% endfor -%} {% for algorithm in ['hash'] -%} SET join_algorithm='{{ algorithm }}'; @@ -29,6 +49,28 @@ SET join_algorithm='{{ algorithm }}'; SELECT t1.*, t2.* FROM t1 {{ join_type }} JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY (t1.key, t1.attr, t2.key, t2.attr); {% endfor -%} {% endfor -%} + +{% for algorithm in ['hash', 'grace_hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['INNER'] -%} +{% for join_strictness in ['ANY'] -%} +SELECT t1.*, t2.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON (t1.a < t2.a OR lower(t1.attr) == lower(t2.attr)) AND t1.key = t2.key ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.b + t2.b == t1.c + t2.c) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT t1.*, t2.* from t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key and (t1.a < t2.a) ORDER BY (t1.key, t1.attr, t2.key, t2.attr); +SELECT * FROM (SELECT 1 AS a, 1 AS b, 1 AS c) AS t1 {{ join_type }} {{ join_strictness }} JOIN (SELECT 1 AS a, 1 AS b, 1 AS c) AS t2 ON t1.a = t2.a AND (t1.b > 0 OR t2.b > 0); +{% endfor -%} +{% endfor -%} +{% endfor -%} + +{% for algorithm in ['hash'] -%} +SET join_algorithm='{{ algorithm }}'; +{% for join_type in ['INNER'] -%} +{% for join_strictness in ['ANY'] -%} +SELECT t1.* FROM t1 {{ join_type }} {{ join_strictness }} JOIN t2 ON t1.key = t2.key AND t1.a < t2.a OR t1.a = t2.a ORDER BY ALL; +{% endfor -%} +{% endfor -%} +{% endfor -%} + -- { echoOff } -- test error messages diff --git a/tests/queries/0_stateless/03008_deduplication_random_setttings.sh b/tests/queries/0_stateless/03008_deduplication_random_setttings.sh index e9f59138177..07b99eb4e99 100755 --- a/tests/queries/0_stateless/03008_deduplication_random_setttings.sh +++ b/tests/queries/0_stateless/03008_deduplication_random_setttings.sh @@ -35,7 +35,7 @@ THIS_RUN+=" deduplicate_src_table=$deduplicate_src_table" THIS_RUN+=" deduplicate_dst_table=$deduplicate_dst_table" THIS_RUN+=" insert_unique_blocks=$insert_unique_blocks" -$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " +$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python insert_several_blocks_into_table \ --insert-method $insert_method \ --table-engine $engine \ @@ -48,7 +48,7 @@ $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " ) " 1>/dev/null 2>&1 && echo 'insert_several_blocks_into_table OK' || echo "FAIL: insert_several_blocks_into_table ${THIS_RUN}" -$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " +$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python mv_generates_several_blocks \ --insert-method $insert_method \ --table-engine $engine \ @@ -61,7 +61,7 @@ $CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " ) " 1>/dev/null 2>&1 && echo 'mv_generates_several_blocks OK' || echo "FAIL: mv_generates_several_blocks ${THIS_RUN}" -$CLICKHOUSE_CLIENT --max_insert_block_size 1 -nmq " +$CLICKHOUSE_CLIENT --max_insert_block_size 1 -mq " $(python3 $CURDIR/03008_deduplication.python several_mv_into_one_table \ --insert-method $insert_method \ --table-engine $engine \ diff --git a/tests/queries/0_stateless/03008_local_plain_rewritable.sh b/tests/queries/0_stateless/03008_local_plain_rewritable.sh index d51e180efc9..e61f9061297 100755 --- a/tests/queries/0_stateless/03008_local_plain_rewritable.sh +++ b/tests/queries/0_stateless/03008_local_plain_rewritable.sh @@ -8,7 +8,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) ${CLICKHOUSE_CLIENT} --query "drop table if exists 03008_test_local_mt sync" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " create table 03008_test_local_mt (a Int32, b Int64, c Int64) engine = MergeTree() partition by intDiv(a, 1000) order by tuple(a, b) settings disk = disk( @@ -19,35 +19,35 @@ settings disk = disk( path = '/var/lib/clickhouse/disks/local_plain_rewritable/') " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " insert into 03008_test_local_mt (*) values (1, 2, 0), (2, 2, 2), (3, 1, 9), (4, 7, 7), (5, 10, 2), (6, 12, 5); insert into 03008_test_local_mt (*) select number, number, number from numbers_mt(10000); " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " select count(*) from 03008_test_local_mt; select (*) from 03008_test_local_mt order by tuple(a, b) limit 10; " ${CLICKHOUSE_CLIENT} --query "optimize table 03008_test_local_mt final;" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " alter table 03008_test_local_mt modify setting disk = '03008_local_plain_rewritable', old_parts_lifetime = 3600; select engine_full from system.tables WHERE database = currentDatabase() AND name = '03008_test_local_mt'; " | grep -c "old_parts_lifetime = 3600" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " select count(*) from 03008_test_local_mt; select (*) from 03008_test_local_mt order by tuple(a, b) limit 10; " -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " alter table 03008_test_local_mt update c = 0 where a % 2 = 1; alter table 03008_test_local_mt add column d Int64 after c; alter table 03008_test_local_mt drop column c; " 2>&1 | grep -Fq "SUPPORT_IS_DISABLED" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " truncate table 03008_test_local_mt; select count(*) from 03008_test_local_mt; " diff --git a/tests/queries/0_stateless/03031_clickhouse_local_input.sh b/tests/queries/0_stateless/03031_clickhouse_local_input.sh index 6f59e9b9703..e2f9cf48108 100755 --- a/tests/queries/0_stateless/03031_clickhouse_local_input.sh +++ b/tests/queries/0_stateless/03031_clickhouse_local_input.sh @@ -6,15 +6,15 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) tmp_file="$CUR_DIR/$CLICKHOUSE_DATABASE.txt" echo '# foo' -$CLICKHOUSE_LOCAL --engine_file_truncate_on_insert=1 -n -q "insert into function file('$tmp_file', 'LineAsString', 'x String') select * from input('x String') format LineAsString" << d2 -[1,2,3] [1,2,3] 0 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 0 Array(Int64) Array(Int64) -[1,2,3] 42 0 Array(Int64) Int64 -[1,2,3] abc 0 Array(Int64) String -[1,2,3] \N 0 Array(Int64) None -[1,2,4] [1,2,3] 1 Array(Int64) Array(Int64) -42 [1,2,3] 1 Int64 Array(Int64) -42 42 0 Int64 Int64 -42 43 0 Int64 Int64 -42 abc 0 Int64 String -42 \N 0 Int64 None -43 42 1 Int64 Int64 -abc [1,2,3] 1 String Array(Int64) -abc 42 1 String Int64 -abc abc 0 String String -abc abd 0 String String -abc \N 0 String None -abd abc 1 String String -\N [1,2,3] 1 None Array(Int64) -\N 42 1 None Int64 -\N abc 1 None String -\N \N 0 None None +[1,2,3] [1,2,3] 0 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 0 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 0 Array(Int64) true Date true +[1,2,3] 42 0 Array(Int64) true Int64 false +[1,2,3] abc 0 Array(Int64) true String false +[1,2,3] \N 0 Array(Int64) true None false +[1,2,4] [1,2,3] 1 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 1 Date true Array(Int64) true +2020-01-01 2020-01-01 0 Date true Date true +2020-01-01 2020-01-02 0 Date true Date true +2020-01-01 42 0 Date true Int64 false +2020-01-01 abc 0 Date true String false +2020-01-01 \N 0 Date true None false +2020-01-02 2020-01-01 1 Date true Date true +42 [1,2,3] 1 Int64 false Array(Int64) true +42 2020-01-01 1 Int64 false Date true +42 42 0 Int64 false Int64 false +42 43 0 Int64 false Int64 false +42 abc 0 Int64 false String false +42 \N 0 Int64 false None false +43 42 1 Int64 false Int64 false +abc [1,2,3] 1 String false Array(Int64) true +abc 2020-01-01 1 String false Date true +abc 42 1 String false Int64 false +abc abc 0 String false String false +abc abd 0 String false String false +abc \N 0 String false None false +abd abc 1 String false String false +\N [1,2,3] 1 None false Array(Int64) true +\N 2020-01-01 1 None false Date true +\N 42 1 None false Int64 false +\N abc 1 None false String false +\N \N 0 None false None false d1 >= d2 -[1,2,3] [1,2,3] 1 Array(Int64) Array(Int64) -[1,2,3] [1,2,4] 1 Array(Int64) Array(Int64) -[1,2,3] 42 1 Array(Int64) Int64 -[1,2,3] abc 1 Array(Int64) String -[1,2,3] \N 1 Array(Int64) None -[1,2,4] [1,2,3] 1 Array(Int64) Array(Int64) -42 [1,2,3] 1 Int64 Array(Int64) -42 42 1 Int64 Int64 -42 43 1 Int64 Int64 -42 abc 1 Int64 String -42 \N 1 Int64 None -43 42 1 Int64 Int64 -abc [1,2,3] 1 String Array(Int64) -abc 42 1 String Int64 -abc abc 1 String String -abc abd 1 String String -abc \N 1 String None -abd abc 1 String String -\N [1,2,3] 1 None Array(Int64) -\N 42 1 None Int64 -\N abc 1 None String -\N \N 1 None None +[1,2,3] [1,2,3] 1 Array(Int64) true Array(Int64) true +[1,2,3] [1,2,4] 1 Array(Int64) true Array(Int64) true +[1,2,3] 2020-01-01 1 Array(Int64) true Date true +[1,2,3] 42 1 Array(Int64) true Int64 false +[1,2,3] abc 1 Array(Int64) true String false +[1,2,3] \N 1 Array(Int64) true None false +[1,2,4] [1,2,3] 1 Array(Int64) true Array(Int64) true +2020-01-01 [1,2,3] 1 Date true Array(Int64) true +2020-01-01 2020-01-01 1 Date true Date true +2020-01-01 2020-01-02 1 Date true Date true +2020-01-01 42 1 Date true Int64 false +2020-01-01 abc 1 Date true String false +2020-01-01 \N 1 Date true None false +2020-01-02 2020-01-01 1 Date true Date true +42 [1,2,3] 1 Int64 false Array(Int64) true +42 2020-01-01 1 Int64 false Date true +42 42 1 Int64 false Int64 false +42 43 1 Int64 false Int64 false +42 abc 1 Int64 false String false +42 \N 1 Int64 false None false +43 42 1 Int64 false Int64 false +abc [1,2,3] 1 String false Array(Int64) true +abc 2020-01-01 1 String false Date true +abc 42 1 String false Int64 false +abc abc 1 String false String false +abc abd 1 String false String false +abc \N 1 String false None false +abd abc 1 String false String false +\N [1,2,3] 1 None false Array(Int64) true +\N 2020-01-01 1 None false Date true +\N 42 1 None false Int64 false +\N abc 1 None false String false +\N \N 1 None false None false diff --git a/tests/queries/0_stateless/03035_dynamic_sorting.sql b/tests/queries/0_stateless/03035_dynamic_sorting.sql index 0487fafc955..e0039a348c6 100644 --- a/tests/queries/0_stateless/03035_dynamic_sorting.sql +++ b/tests/queries/0_stateless/03035_dynamic_sorting.sql @@ -1,80 +1,55 @@ set allow_experimental_dynamic_type = 1; drop table if exists test; -create table test (d1 Dynamic, d2 Dynamic) engine=Memory; - -insert into test values (42, 42); -insert into test values (42, 43); -insert into test values (43, 42); - -insert into test values ('abc', 'abc'); -insert into test values ('abc', 'abd'); -insert into test values ('abd', 'abc'); - -insert into test values ([1,2,3], [1,2,3]); -insert into test values ([1,2,3], [1,2,4]); -insert into test values ([1,2,4], [1,2,3]); - -insert into test values (NULL, NULL); - -insert into test values (42, 'abc'); -insert into test values ('abc', 42); - -insert into test values (42, [1,2,3]); -insert into test values ([1,2,3], 42); - -insert into test values (42, NULL); -insert into test values (NULL, 42); - -insert into test values ('abc', [1,2,3]); -insert into test values ([1,2,3], 'abc'); - -insert into test values ('abc', NULL); -insert into test values (NULL, 'abc'); - -insert into test values ([1,2,3], NULL); -insert into test values (NULL, [1,2,3]); +create table test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) engine=Memory; +insert into test values (42, 42), (42, 43), (43, 42), ('abc', 'abc'), ('abc', 'abd'), ('abd', 'abc'), +([1,2,3], [1,2,3]), ([1,2,3], [1,2,4]), ([1,2,4], [1,2,3]), +('2020-01-01', '2020-01-01'), ('2020-01-01', '2020-01-02'), ('2020-01-02', '2020-01-01'), +(NULL, NULL), (42, 'abc'), ('abc', 42), (42, [1,2,3]), ([1,2,3], 42), (42, NULL), (NULL, 42), +('abc', [1,2,3]), ([1,2,3], 'abc'), ('abc', NULL), (NULL, 'abc'), ([1,2,3], NULL), (NULL, [1,2,3]), +(42, '2020-01-01'), ('2020-01-01', 42), ('2020-01-01', 'abc'), ('abc', '2020-01-01'), +('2020-01-01', [1,2,3]), ([1,2,3], '2020-01-01'), ('2020-01-01', NULL), (NULL, '2020-01-01'); select 'order by d1 nulls first'; -select d1, dynamicType(d1) from test order by d1 nulls first; +select d1, dynamicType(d1), isDynamicElementInSharedData(d1) from test order by d1 nulls first; select 'order by d1 nulls last'; -select d1, dynamicType(d1) from test order by d1 nulls last; +select d1, dynamicType(d1), isDynamicElementInSharedData(d1) from test order by d1 nulls last; select 'order by d2 nulls first'; -select d2, dynamicType(d2) from test order by d2 nulls first; +select d2, dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2 nulls first; select 'order by d2 nulls last'; -select d2, dynamicType(d2) from test order by d2 nulls last; +select d2, dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2 nulls last; select 'order by d1, d2 nulls first'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2 nulls first; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2 nulls first; select 'order by d1, d2 nulls last'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2 nulls last; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2 nulls last; select 'order by d2, d1 nulls first'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d2, d1 nulls first; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2, d1 nulls first; select 'order by d2, d1 nulls last'; -select d1, d2, dynamicType(d1), dynamicType(d2) from test order by d2, d1 nulls last; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2, d1 nulls last; select 'd1 = d2'; -select d1, d2, d1 = d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 = d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 < d2'; -select d1, d2, d1 < d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 < d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 <= d2'; -select d1, d2, d1 <= d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 <= d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 > d2'; -select d1, d2, d1 > d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d1 > d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; select 'd1 >= d2'; -select d1, d2, d2 >= d2, dynamicType(d1), dynamicType(d2) from test order by d1, d2; +select d1, d2, d2 >= d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2; drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference new file mode 100644 index 00000000000..ca6c5dbba82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.reference @@ -0,0 +1,20 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +360000 +360000 +200000 +200000 +0 +0 +20000 +20000 +200000 +200000 +20000 +20000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql new file mode 100644 index 00000000000..bff28fb5c90 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_compact_merge_tree.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`LowCardinality(String)`, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`LowCardinality(String)`, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`LowCardinality(String)`, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference new file mode 100644 index 00000000000..ca6c5dbba82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.reference @@ -0,0 +1,20 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +360000 +360000 +200000 +200000 +0 +0 +20000 +20000 +200000 +200000 +20000 +20000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql new file mode 100644 index 00000000000..4eed3d15529 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_memory.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=Memory; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 new file mode 100644 index 00000000000..de12c6b8737 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.reference.j2 @@ -0,0 +1,2463 @@ +Memory +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +36 +36 +20 +20 +0 +0 +2 +2 +20 +20 +2 +2 +20 +0 +0 0 \N [] +1 1 \N [] +2 2 \N [] +3 3 \N [] +4 4 \N [] +5 5 \N [] +6 6 \N [] +7 7 \N [] +8 8 \N [] +9 9 \N [] +str_10 \N str_10 [] +10 10 \N [] +10 10 \N [] +[[0,1]] \N \N [] +str_11 \N \N [] +str_11 \N str_11 [] +str_12 \N str_12 [] +12 12 \N [] +12 12 \N [] +str_13 \N str_13 [] +13 13 \N [] +13 13 \N [] +str_14 \N str_14 [] +14 14 \N [] +14 14 \N [] +str_15 \N str_15 [] +15 15 \N [] +15 15 \N [] +[[0,1,2,3,4,5,6]] \N \N [] +str_16 \N \N [] +str_16 \N str_16 [] +str_17 \N str_17 [] +17 17 \N [] +17 17 \N [] +str_18 \N str_18 [] +18 18 \N [] +18 18 \N [] +str_19 \N str_19 [] +19 19 \N [] +19 19 \N [] +[20] \N \N [20] +['str_21','str_21'] \N \N ['str_21','str_21'] +[22,22,22] \N \N [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +[40] \N \N [40] +41 41 \N [] +\N \N \N [] +str_43 \N str_43 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] +45 45 \N [] +\N \N \N [] +str_47 \N str_47 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 49 \N [] +\N \N \N [] +str_51 \N str_51 [] +[52,52,52] \N \N [52,52,52] +53 53 \N [] +\N \N \N [] +str_55 \N str_55 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] +57 57 \N [] +\N \N \N [] +str_59 \N str_59 [] +[60] \N \N [60] +61 61 \N [] +\N \N \N [] +str_63 \N str_63 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] +65 65 \N [] +\N \N \N [] +str_67 \N str_67 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] +69 69 \N [] +\N \N \N [] +str_71 \N str_71 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] +73 73 \N [] +\N \N \N [] +str_75 \N str_75 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] +77 77 \N [] +\N \N \N [] +str_79 \N str_79 [] +0 \N [] +1 \N [] +2 \N [] +3 \N [] +4 \N [] +5 \N [] +6 \N [] +7 \N [] +8 \N [] +9 \N [] +\N str_10 [] +10 \N [] +10 \N [] +\N \N [] +\N \N [] +\N str_11 [] +\N str_12 [] +12 \N [] +12 \N [] +\N str_13 [] +13 \N [] +13 \N [] +\N str_14 [] +14 \N [] +14 \N [] +\N str_15 [] +15 \N [] +15 \N [] +\N \N [] +\N \N [] +\N str_16 [] +\N str_17 [] +17 \N [] +17 \N [] +\N str_18 [] +18 \N [] +18 \N [] +\N str_19 [] +19 \N [] +19 \N [] +\N \N [20] +\N \N ['str_21','str_21'] +\N \N [22,22,22] +\N \N [23,23,23,23] +\N \N [24,24,24,24,24] +\N \N [25,25,25,25,25,25] +\N \N [26,26,26,26,26,26,26] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] +\N \N [29,29,29,29,29,29,29,29,29,29] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [40] +41 \N [] +\N \N [] +\N str_43 [] +\N \N [44,44,44,44,44] +45 \N [] +\N \N [] +\N str_47 [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] +\N \N [] +\N str_51 [] +\N \N [52,52,52] +53 \N [] +\N \N [] +\N str_55 [] +\N \N [56,56,56,56,56,56,56] +57 \N [] +\N \N [] +\N str_59 [] +\N \N [60] +61 \N [] +\N \N [] +\N str_63 [] +\N \N [64,64,64,64,64] +65 \N [] +\N \N [] +\N str_67 [] +\N \N [68,68,68,68,68,68,68,68,68] +69 \N [] +\N \N [] +\N str_71 [] +\N \N [NULL,NULL,NULL] +73 \N [] +\N \N [] +\N str_75 [] +\N \N [76,76,76,76,76,76,76] +77 \N [] +\N \N [] +\N str_79 [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +0 0 \N [] 0 [] +1 1 \N [] 0 [] +2 2 \N [] 0 [] +3 3 \N [] 0 [] +4 4 \N [] 0 [] +5 5 \N [] 0 [] +6 6 \N [] 0 [] +7 7 \N [] 0 [] +8 8 \N [] 0 [] +9 9 \N [] 0 [] +str_10 \N \N [] 0 [] +10 10 \N [] 0 [] +10 10 \N [] 0 [] +[[0,1]] \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_12 \N \N [] 0 [] +12 12 \N [] 0 [] +12 12 \N [] 0 [] +str_13 \N \N [] 0 [] +13 13 \N [] 0 [] +13 13 \N [] 0 [] +str_14 \N \N [] 0 [] +14 14 \N [] 0 [] +14 14 \N [] 0 [] +str_15 \N \N [] 0 [] +15 15 \N [] 0 [] +15 15 \N [] 0 [] +[[0,1,2,3,4,5,6]] \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_17 \N \N [] 0 [] +17 17 \N [] 0 [] +17 17 \N [] 0 [] +str_18 \N \N [] 0 [] +18 18 \N [] 0 [] +18 18 \N [] 0 [] +str_19 \N \N [] 0 [] +19 19 \N [] 0 [] +19 19 \N [] 0 [] +[20] \N \N [20] 1 [20] +['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] +[22,22,22] \N \N [22,22,22] 3 [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] 4 [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] 5 [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +[40] \N \N [40] 1 [40] +41 41 \N [] 0 [] +\N \N \N [] 0 [] +str_43 \N \N [] 0 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] 5 [44,44,44,44,44] +45 45 \N [] 0 [] +\N \N \N [] 0 [] +str_47 \N \N [] 0 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 49 \N [] 0 [] +\N \N \N [] 0 [] +str_51 \N \N [] 0 [] +[52,52,52] \N \N [52,52,52] 3 [52,52,52] +53 53 \N [] 0 [] +\N \N \N [] 0 [] +str_55 \N \N [] 0 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] +57 57 \N [] 0 [] +\N \N \N [] 0 [] +str_59 \N \N [] 0 [] +[60] \N \N [60] 1 [60] +61 61 \N [] 0 [] +\N \N \N [] 0 [] +str_63 \N \N [] 0 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] 5 [64,64,64,64,64] +65 65 \N [] 0 [] +\N \N \N [] 0 [] +str_67 \N \N [] 0 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] +69 69 \N [] 0 [] +\N \N \N [] 0 [] +str_71 \N \N [] 0 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] +73 73 \N [] 0 [] +\N \N \N [] 0 [] +str_75 \N \N [] 0 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] +77 77 \N [] 0 [] +\N \N \N [] 0 [] +str_79 \N \N [] 0 [] +0 \N [] 0 [] [] +1 \N [] 0 [] [] +2 \N [] 0 [] [] +3 \N [] 0 [] [] +4 \N [] 0 [] [] +5 \N [] 0 [] [] +6 \N [] 0 [] [] +7 \N [] 0 [] [] +8 \N [] 0 [] [] +9 \N [] 0 [] [] +\N \N [] 0 [] [] +10 \N [] 0 [] [] +10 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +12 \N [] 0 [] [] +12 \N [] 0 [] [] +\N \N [] 0 [] [] +13 \N [] 0 [] [] +13 \N [] 0 [] [] +\N \N [] 0 [] [] +14 \N [] 0 [] [] +14 \N [] 0 [] [] +\N \N [] 0 [] [] +15 \N [] 0 [] [] +15 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +17 \N [] 0 [] [] +17 \N [] 0 [] [] +\N \N [] 0 [] [] +18 \N [] 0 [] [] +18 \N [] 0 [] [] +\N \N [] 0 [] [] +19 \N [] 0 [] [] +19 \N [] 0 [] [] +\N \N [20] 1 [20] [NULL] +\N \N ['str_21','str_21'] 2 [NULL,NULL] ['str_21','str_21'] +\N \N [22,22,22] 3 [22,22,22] [NULL,NULL,NULL] +\N \N [23,23,23,23] 4 [23,23,23,23] [NULL,NULL,NULL,NULL] +\N \N [24,24,24,24,24] 5 [24,24,24,24,24] [NULL,NULL,NULL,NULL,NULL] +\N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] [NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [40] 1 [40] [NULL] +41 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [44,44,44,44,44] 5 [44,44,44,44,44] [NULL,NULL,NULL,NULL,NULL] +45 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [52,52,52] 3 [52,52,52] [NULL,NULL,NULL] +53 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +57 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [60] 1 [60] [NULL] +61 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [64,64,64,64,64] 5 [64,64,64,64,64] [NULL,NULL,NULL,NULL,NULL] +65 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +69 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] [NULL,NULL,NULL] +73 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +77 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [20] +[] 0 [NULL,NULL] +[] 0 [22,22,22] +[] 0 [23,23,23,23] +[] 0 [24,24,24,24,24] +[] 0 [25,25,25,25,25,25] +[] 0 [26,26,26,26,26,26,26] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [28,28,28,28,28,28,28,28,28] +[] 0 [29,29,29,29,29,29,29,29,29,29] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [40] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [44,44,44,44,44] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [52,52,52] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [56,56,56,56,56,56,56] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [60] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [64,64,64,64,64] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [68,68,68,68,68,68,68,68,68] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [76,76,76,76,76,76,76] +[] 0 [] +[] 0 [] +[] 0 [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000 +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +36 +36 +20 +20 +0 +0 +2 +2 +20 +20 +2 +2 +20 +0 +0 0 \N [] +1 1 \N [] +2 2 \N [] +3 3 \N [] +4 4 \N [] +5 5 \N [] +6 6 \N [] +7 7 \N [] +8 8 \N [] +9 9 \N [] +str_10 \N str_10 [] +10 10 \N [] +10 10 \N [] +[[0,1]] \N \N [] +str_11 \N \N [] +str_11 \N str_11 [] +str_12 \N str_12 [] +12 12 \N [] +12 12 \N [] +str_13 \N str_13 [] +13 13 \N [] +13 13 \N [] +str_14 \N str_14 [] +14 14 \N [] +14 14 \N [] +str_15 \N str_15 [] +15 15 \N [] +15 15 \N [] +[[0,1,2,3,4,5,6]] \N \N [] +str_16 \N \N [] +str_16 \N str_16 [] +str_17 \N str_17 [] +17 17 \N [] +17 17 \N [] +str_18 \N str_18 [] +18 18 \N [] +18 18 \N [] +str_19 \N str_19 [] +19 19 \N [] +19 19 \N [] +[20] \N \N [20] +['str_21','str_21'] \N \N ['str_21','str_21'] +[22,22,22] \N \N [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +[40] \N \N [40] +41 41 \N [] +\N \N \N [] +str_43 \N str_43 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] +45 45 \N [] +\N \N \N [] +str_47 \N str_47 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 49 \N [] +\N \N \N [] +str_51 \N str_51 [] +[52,52,52] \N \N [52,52,52] +53 53 \N [] +\N \N \N [] +str_55 \N str_55 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] +57 57 \N [] +\N \N \N [] +str_59 \N str_59 [] +[60] \N \N [60] +61 61 \N [] +\N \N \N [] +str_63 \N str_63 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] +65 65 \N [] +\N \N \N [] +str_67 \N str_67 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] +69 69 \N [] +\N \N \N [] +str_71 \N str_71 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] +73 73 \N [] +\N \N \N [] +str_75 \N str_75 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] +77 77 \N [] +\N \N \N [] +str_79 \N str_79 [] +0 \N [] +1 \N [] +2 \N [] +3 \N [] +4 \N [] +5 \N [] +6 \N [] +7 \N [] +8 \N [] +9 \N [] +\N str_10 [] +10 \N [] +10 \N [] +\N \N [] +\N \N [] +\N str_11 [] +\N str_12 [] +12 \N [] +12 \N [] +\N str_13 [] +13 \N [] +13 \N [] +\N str_14 [] +14 \N [] +14 \N [] +\N str_15 [] +15 \N [] +15 \N [] +\N \N [] +\N \N [] +\N str_16 [] +\N str_17 [] +17 \N [] +17 \N [] +\N str_18 [] +18 \N [] +18 \N [] +\N str_19 [] +19 \N [] +19 \N [] +\N \N [20] +\N \N ['str_21','str_21'] +\N \N [22,22,22] +\N \N [23,23,23,23] +\N \N [24,24,24,24,24] +\N \N [25,25,25,25,25,25] +\N \N [26,26,26,26,26,26,26] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] +\N \N [29,29,29,29,29,29,29,29,29,29] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [40] +41 \N [] +\N \N [] +\N str_43 [] +\N \N [44,44,44,44,44] +45 \N [] +\N \N [] +\N str_47 [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] +\N \N [] +\N str_51 [] +\N \N [52,52,52] +53 \N [] +\N \N [] +\N str_55 [] +\N \N [56,56,56,56,56,56,56] +57 \N [] +\N \N [] +\N str_59 [] +\N \N [60] +61 \N [] +\N \N [] +\N str_63 [] +\N \N [64,64,64,64,64] +65 \N [] +\N \N [] +\N str_67 [] +\N \N [68,68,68,68,68,68,68,68,68] +69 \N [] +\N \N [] +\N str_71 [] +\N \N [NULL,NULL,NULL] +73 \N [] +\N \N [] +\N str_75 [] +\N \N [76,76,76,76,76,76,76] +77 \N [] +\N \N [] +\N str_79 [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +0 0 \N [] 0 [] +1 1 \N [] 0 [] +2 2 \N [] 0 [] +3 3 \N [] 0 [] +4 4 \N [] 0 [] +5 5 \N [] 0 [] +6 6 \N [] 0 [] +7 7 \N [] 0 [] +8 8 \N [] 0 [] +9 9 \N [] 0 [] +str_10 \N \N [] 0 [] +10 10 \N [] 0 [] +10 10 \N [] 0 [] +[[0,1]] \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_12 \N \N [] 0 [] +12 12 \N [] 0 [] +12 12 \N [] 0 [] +str_13 \N \N [] 0 [] +13 13 \N [] 0 [] +13 13 \N [] 0 [] +str_14 \N \N [] 0 [] +14 14 \N [] 0 [] +14 14 \N [] 0 [] +str_15 \N \N [] 0 [] +15 15 \N [] 0 [] +15 15 \N [] 0 [] +[[0,1,2,3,4,5,6]] \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_17 \N \N [] 0 [] +17 17 \N [] 0 [] +17 17 \N [] 0 [] +str_18 \N \N [] 0 [] +18 18 \N [] 0 [] +18 18 \N [] 0 [] +str_19 \N \N [] 0 [] +19 19 \N [] 0 [] +19 19 \N [] 0 [] +[20] \N \N [20] 1 [20] +['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] +[22,22,22] \N \N [22,22,22] 3 [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] 4 [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] 5 [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +[40] \N \N [40] 1 [40] +41 41 \N [] 0 [] +\N \N \N [] 0 [] +str_43 \N \N [] 0 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] 5 [44,44,44,44,44] +45 45 \N [] 0 [] +\N \N \N [] 0 [] +str_47 \N \N [] 0 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 49 \N [] 0 [] +\N \N \N [] 0 [] +str_51 \N \N [] 0 [] +[52,52,52] \N \N [52,52,52] 3 [52,52,52] +53 53 \N [] 0 [] +\N \N \N [] 0 [] +str_55 \N \N [] 0 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] +57 57 \N [] 0 [] +\N \N \N [] 0 [] +str_59 \N \N [] 0 [] +[60] \N \N [60] 1 [60] +61 61 \N [] 0 [] +\N \N \N [] 0 [] +str_63 \N \N [] 0 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] 5 [64,64,64,64,64] +65 65 \N [] 0 [] +\N \N \N [] 0 [] +str_67 \N \N [] 0 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] +69 69 \N [] 0 [] +\N \N \N [] 0 [] +str_71 \N \N [] 0 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] +73 73 \N [] 0 [] +\N \N \N [] 0 [] +str_75 \N \N [] 0 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] +77 77 \N [] 0 [] +\N \N \N [] 0 [] +str_79 \N \N [] 0 [] +0 \N [] 0 [] [] +1 \N [] 0 [] [] +2 \N [] 0 [] [] +3 \N [] 0 [] [] +4 \N [] 0 [] [] +5 \N [] 0 [] [] +6 \N [] 0 [] [] +7 \N [] 0 [] [] +8 \N [] 0 [] [] +9 \N [] 0 [] [] +\N \N [] 0 [] [] +10 \N [] 0 [] [] +10 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +12 \N [] 0 [] [] +12 \N [] 0 [] [] +\N \N [] 0 [] [] +13 \N [] 0 [] [] +13 \N [] 0 [] [] +\N \N [] 0 [] [] +14 \N [] 0 [] [] +14 \N [] 0 [] [] +\N \N [] 0 [] [] +15 \N [] 0 [] [] +15 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +17 \N [] 0 [] [] +17 \N [] 0 [] [] +\N \N [] 0 [] [] +18 \N [] 0 [] [] +18 \N [] 0 [] [] +\N \N [] 0 [] [] +19 \N [] 0 [] [] +19 \N [] 0 [] [] +\N \N [20] 1 [20] [NULL] +\N \N ['str_21','str_21'] 2 [NULL,NULL] ['str_21','str_21'] +\N \N [22,22,22] 3 [22,22,22] [NULL,NULL,NULL] +\N \N [23,23,23,23] 4 [23,23,23,23] [NULL,NULL,NULL,NULL] +\N \N [24,24,24,24,24] 5 [24,24,24,24,24] [NULL,NULL,NULL,NULL,NULL] +\N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] [NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [40] 1 [40] [NULL] +41 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [44,44,44,44,44] 5 [44,44,44,44,44] [NULL,NULL,NULL,NULL,NULL] +45 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [52,52,52] 3 [52,52,52] [NULL,NULL,NULL] +53 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +57 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [60] 1 [60] [NULL] +61 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [64,64,64,64,64] 5 [64,64,64,64,64] [NULL,NULL,NULL,NULL,NULL] +65 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +69 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] [NULL,NULL,NULL] +73 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +77 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [20] +[] 0 [NULL,NULL] +[] 0 [22,22,22] +[] 0 [23,23,23,23] +[] 0 [24,24,24,24,24] +[] 0 [25,25,25,25,25,25] +[] 0 [26,26,26,26,26,26,26] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [28,28,28,28,28,28,28,28,28] +[] 0 [29,29,29,29,29,29,29,29,29,29] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [40] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [44,44,44,44,44] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [52,52,52] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [56,56,56,56,56,56,56] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [60] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [64,64,64,64,64] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [68,68,68,68,68,68,68,68,68] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [76,76,76,76,76,76,76] +[] 0 [] +[] 0 [] +[] 0 [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1 +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +36 +36 +20 +20 +0 +0 +2 +2 +20 +20 +2 +2 +20 +0 +0 0 \N [] +1 1 \N [] +2 2 \N [] +3 3 \N [] +4 4 \N [] +5 5 \N [] +6 6 \N [] +7 7 \N [] +8 8 \N [] +9 9 \N [] +str_10 \N str_10 [] +10 10 \N [] +10 10 \N [] +[[0,1]] \N \N [] +str_11 \N \N [] +str_11 \N str_11 [] +str_12 \N str_12 [] +12 12 \N [] +12 12 \N [] +str_13 \N str_13 [] +13 13 \N [] +13 13 \N [] +str_14 \N str_14 [] +14 14 \N [] +14 14 \N [] +str_15 \N str_15 [] +15 15 \N [] +15 15 \N [] +[[0,1,2,3,4,5,6]] \N \N [] +str_16 \N \N [] +str_16 \N str_16 [] +str_17 \N str_17 [] +17 17 \N [] +17 17 \N [] +str_18 \N str_18 [] +18 18 \N [] +18 18 \N [] +str_19 \N str_19 [] +19 19 \N [] +19 19 \N [] +[20] \N \N [20] +['str_21','str_21'] \N \N ['str_21','str_21'] +[22,22,22] \N \N [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +\N \N \N [] +[40] \N \N [40] +41 41 \N [] +\N \N \N [] +str_43 \N str_43 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] +45 45 \N [] +\N \N \N [] +str_47 \N str_47 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 49 \N [] +\N \N \N [] +str_51 \N str_51 [] +[52,52,52] \N \N [52,52,52] +53 53 \N [] +\N \N \N [] +str_55 \N str_55 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] +57 57 \N [] +\N \N \N [] +str_59 \N str_59 [] +[60] \N \N [60] +61 61 \N [] +\N \N \N [] +str_63 \N str_63 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] +65 65 \N [] +\N \N \N [] +str_67 \N str_67 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] +69 69 \N [] +\N \N \N [] +str_71 \N str_71 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] +73 73 \N [] +\N \N \N [] +str_75 \N str_75 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] +77 77 \N [] +\N \N \N [] +str_79 \N str_79 [] +0 \N [] +1 \N [] +2 \N [] +3 \N [] +4 \N [] +5 \N [] +6 \N [] +7 \N [] +8 \N [] +9 \N [] +\N str_10 [] +10 \N [] +10 \N [] +\N \N [] +\N \N [] +\N str_11 [] +\N str_12 [] +12 \N [] +12 \N [] +\N str_13 [] +13 \N [] +13 \N [] +\N str_14 [] +14 \N [] +14 \N [] +\N str_15 [] +15 \N [] +15 \N [] +\N \N [] +\N \N [] +\N str_16 [] +\N str_17 [] +17 \N [] +17 \N [] +\N str_18 [] +18 \N [] +18 \N [] +\N str_19 [] +19 \N [] +19 \N [] +\N \N [20] +\N \N ['str_21','str_21'] +\N \N [22,22,22] +\N \N [23,23,23,23] +\N \N [24,24,24,24,24] +\N \N [25,25,25,25,25,25] +\N \N [26,26,26,26,26,26,26] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] +\N \N [29,29,29,29,29,29,29,29,29,29] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [40] +41 \N [] +\N \N [] +\N str_43 [] +\N \N [44,44,44,44,44] +45 \N [] +\N \N [] +\N str_47 [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] +\N \N [] +\N str_51 [] +\N \N [52,52,52] +53 \N [] +\N \N [] +\N str_55 [] +\N \N [56,56,56,56,56,56,56] +57 \N [] +\N \N [] +\N str_59 [] +\N \N [60] +61 \N [] +\N \N [] +\N str_63 [] +\N \N [64,64,64,64,64] +65 \N [] +\N \N [] +\N str_67 [] +\N \N [68,68,68,68,68,68,68,68,68] +69 \N [] +\N \N [] +\N str_71 [] +\N \N [NULL,NULL,NULL] +73 \N [] +\N \N [] +\N str_75 [] +\N \N [76,76,76,76,76,76,76] +77 \N [] +\N \N [] +\N str_79 [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +\N \N [] +0 0 \N [] 0 [] +1 1 \N [] 0 [] +2 2 \N [] 0 [] +3 3 \N [] 0 [] +4 4 \N [] 0 [] +5 5 \N [] 0 [] +6 6 \N [] 0 [] +7 7 \N [] 0 [] +8 8 \N [] 0 [] +9 9 \N [] 0 [] +str_10 \N \N [] 0 [] +10 10 \N [] 0 [] +10 10 \N [] 0 [] +[[0,1]] \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_11 \N \N [] 0 [] +str_12 \N \N [] 0 [] +12 12 \N [] 0 [] +12 12 \N [] 0 [] +str_13 \N \N [] 0 [] +13 13 \N [] 0 [] +13 13 \N [] 0 [] +str_14 \N \N [] 0 [] +14 14 \N [] 0 [] +14 14 \N [] 0 [] +str_15 \N \N [] 0 [] +15 15 \N [] 0 [] +15 15 \N [] 0 [] +[[0,1,2,3,4,5,6]] \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_16 \N \N [] 0 [] +str_17 \N \N [] 0 [] +17 17 \N [] 0 [] +17 17 \N [] 0 [] +str_18 \N \N [] 0 [] +18 18 \N [] 0 [] +18 18 \N [] 0 [] +str_19 \N \N [] 0 [] +19 19 \N [] 0 [] +19 19 \N [] 0 [] +[20] \N \N [20] 1 [20] +['str_21','str_21'] \N \N ['str_21','str_21'] 2 [NULL,NULL] +[22,22,22] \N \N [22,22,22] 3 [22,22,22] +[23,23,23,23] \N \N [23,23,23,23] 4 [23,23,23,23] +[24,24,24,24,24] \N \N [24,24,24,24,24] 5 [24,24,24,24,24] +[25,25,25,25,25,25] \N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] +[26,26,26,26,26,26,26] \N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] \N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] \N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] \N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +\N \N \N [] 0 [] +[40] \N \N [40] 1 [40] +41 41 \N [] 0 [] +\N \N \N [] 0 [] +str_43 \N \N [] 0 [] +[44,44,44,44,44] \N \N [44,44,44,44,44] 5 [44,44,44,44,44] +45 45 \N [] 0 [] +\N \N \N [] 0 [] +str_47 \N \N [] 0 [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] \N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 49 \N [] 0 [] +\N \N \N [] 0 [] +str_51 \N \N [] 0 [] +[52,52,52] \N \N [52,52,52] 3 [52,52,52] +53 53 \N [] 0 [] +\N \N \N [] 0 [] +str_55 \N \N [] 0 [] +[56,56,56,56,56,56,56] \N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] +57 57 \N [] 0 [] +\N \N \N [] 0 [] +str_59 \N \N [] 0 [] +[60] \N \N [60] 1 [60] +61 61 \N [] 0 [] +\N \N \N [] 0 [] +str_63 \N \N [] 0 [] +[64,64,64,64,64] \N \N [64,64,64,64,64] 5 [64,64,64,64,64] +65 65 \N [] 0 [] +\N \N \N [] 0 [] +str_67 \N \N [] 0 [] +[68,68,68,68,68,68,68,68,68] \N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] +69 69 \N [] 0 [] +\N \N \N [] 0 [] +str_71 \N \N [] 0 [] +[NULL,NULL,NULL] \N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] +73 73 \N [] 0 [] +\N \N \N [] 0 [] +str_75 \N \N [] 0 [] +[76,76,76,76,76,76,76] \N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] +77 77 \N [] 0 [] +\N \N \N [] 0 [] +str_79 \N \N [] 0 [] +0 \N [] 0 [] [] +1 \N [] 0 [] [] +2 \N [] 0 [] [] +3 \N [] 0 [] [] +4 \N [] 0 [] [] +5 \N [] 0 [] [] +6 \N [] 0 [] [] +7 \N [] 0 [] [] +8 \N [] 0 [] [] +9 \N [] 0 [] [] +\N \N [] 0 [] [] +10 \N [] 0 [] [] +10 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +12 \N [] 0 [] [] +12 \N [] 0 [] [] +\N \N [] 0 [] [] +13 \N [] 0 [] [] +13 \N [] 0 [] [] +\N \N [] 0 [] [] +14 \N [] 0 [] [] +14 \N [] 0 [] [] +\N \N [] 0 [] [] +15 \N [] 0 [] [] +15 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +17 \N [] 0 [] [] +17 \N [] 0 [] [] +\N \N [] 0 [] [] +18 \N [] 0 [] [] +18 \N [] 0 [] [] +\N \N [] 0 [] [] +19 \N [] 0 [] [] +19 \N [] 0 [] [] +\N \N [20] 1 [20] [NULL] +\N \N ['str_21','str_21'] 2 [NULL,NULL] ['str_21','str_21'] +\N \N [22,22,22] 3 [22,22,22] [NULL,NULL,NULL] +\N \N [23,23,23,23] 4 [23,23,23,23] [NULL,NULL,NULL,NULL] +\N \N [24,24,24,24,24] 5 [24,24,24,24,24] [NULL,NULL,NULL,NULL,NULL] +\N \N [25,25,25,25,25,25] 6 [25,25,25,25,25,25] [NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [26,26,26,26,26,26,26] 7 [26,26,26,26,26,26,26] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 8 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [28,28,28,28,28,28,28,28,28] 9 [28,28,28,28,28,28,28,28,28] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [29,29,29,29,29,29,29,29,29,29] 10 [29,29,29,29,29,29,29,29,29,29] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [40] 1 [40] [NULL] +41 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [44,44,44,44,44] 5 [44,44,44,44,44] [NULL,NULL,NULL,NULL,NULL] +45 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 9 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] ['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] +49 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [52,52,52] 3 [52,52,52] [NULL,NULL,NULL] +53 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [56,56,56,56,56,56,56] 7 [56,56,56,56,56,56,56] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +57 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [60] 1 [60] [NULL] +61 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [64,64,64,64,64] 5 [64,64,64,64,64] [NULL,NULL,NULL,NULL,NULL] +65 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [68,68,68,68,68,68,68,68,68] 9 [68,68,68,68,68,68,68,68,68] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +69 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [NULL,NULL,NULL] 3 [NULL,NULL,NULL] [NULL,NULL,NULL] +73 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [76,76,76,76,76,76,76] 7 [76,76,76,76,76,76,76] [NULL,NULL,NULL,NULL,NULL,NULL,NULL] +77 \N [] 0 [] [] +\N \N [] 0 [] [] +\N \N [] 0 [] [] +0 0 [] [] +1 0 [] [] +2 0 [] [] +3 0 [] [] +4 0 [] [] +5 0 [] [] +6 0 [] [] +7 0 [] [] +8 0 [] [] +9 0 [] [] +str_10 0 [] [] +10 0 [] [] +10 0 [] [] +[[0,1]] 0 [] [] +str_11 0 [] [] +str_11 0 [] [] +str_12 0 [] [] +12 0 [] [] +12 0 [] [] +str_13 0 [] [] +13 0 [] [] +13 0 [] [] +str_14 0 [] [] +14 0 [] [] +14 0 [] [] +str_15 0 [] [] +15 0 [] [] +15 0 [] [] +[[0,1,2,3,4,5,6]] 0 [] [] +str_16 0 [] [] +str_16 0 [] [] +str_17 0 [] [] +17 0 [] [] +17 0 [] [] +str_18 0 [] [] +18 0 [] [] +18 0 [] [] +str_19 0 [] [] +19 0 [] [] +19 0 [] [] +[20] 0 [] [20] +['str_21','str_21'] 0 [] [NULL,NULL] +[22,22,22] 0 [] [22,22,22] +[23,23,23,23] 0 [] [23,23,23,23] +[24,24,24,24,24] 0 [] [24,24,24,24,24] +[25,25,25,25,25,25] 0 [] [25,25,25,25,25,25] +[26,26,26,26,26,26,26] 0 [] [26,26,26,26,26,26,26] +[NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[28,28,28,28,28,28,28,28,28] 0 [] [28,28,28,28,28,28,28,28,28] +[29,29,29,29,29,29,29,29,29,29] 0 [] [29,29,29,29,29,29,29,29,29,29] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +\N 0 [] [] +[40] 0 [] [40] +41 0 [] [] +\N 0 [] [] +str_43 0 [] [] +[44,44,44,44,44] 0 [] [44,44,44,44,44] +45 0 [] [] +\N 0 [] [] +str_47 0 [] [] +['str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48','str_48'] 0 [] [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +49 0 [] [] +\N 0 [] [] +str_51 0 [] [] +[52,52,52] 0 [] [52,52,52] +53 0 [] [] +\N 0 [] [] +str_55 0 [] [] +[56,56,56,56,56,56,56] 0 [] [56,56,56,56,56,56,56] +57 0 [] [] +\N 0 [] [] +str_59 0 [] [] +[60] 0 [] [60] +61 0 [] [] +\N 0 [] [] +str_63 0 [] [] +[64,64,64,64,64] 0 [] [64,64,64,64,64] +65 0 [] [] +\N 0 [] [] +str_67 0 [] [] +[68,68,68,68,68,68,68,68,68] 0 [] [68,68,68,68,68,68,68,68,68] +69 0 [] [] +\N 0 [] [] +str_71 0 [] [] +[NULL,NULL,NULL] 0 [] [NULL,NULL,NULL] +73 0 [] [] +\N 0 [] [] +str_75 0 [] [] +[76,76,76,76,76,76,76] 0 [] [76,76,76,76,76,76,76] +77 0 [] [] +\N 0 [] [] +str_79 0 [] [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [20] +[] 0 [NULL,NULL] +[] 0 [22,22,22] +[] 0 [23,23,23,23] +[] 0 [24,24,24,24,24] +[] 0 [25,25,25,25,25,25] +[] 0 [26,26,26,26,26,26,26] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [28,28,28,28,28,28,28,28,28] +[] 0 [29,29,29,29,29,29,29,29,29,29] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [40] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [44,44,44,44,44] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [52,52,52] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [56,56,56,56,56,56,56] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [60] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [64,64,64,64,64] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [68,68,68,68,68,68,68,68,68] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [NULL,NULL,NULL] +[] 0 [] +[] 0 [] +[] 0 [] +[] 0 [76,76,76,76,76,76,76] +[] 0 [] +[] 0 [] +[] 0 [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[2] [[0,1]] [[[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[7] [[0,1,2,3,4,5,6]] [[[],[],[],[],[],[],[]]] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] +[] [] [] diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 new file mode 100644 index 00000000000..dde4f3f53c3 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_small.sql.j2 @@ -0,0 +1,47 @@ +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; + +{% for engine in ['Memory', 'MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1'] -%} + +select '{{ engine }}'; +create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; + +insert into test select number, number from numbers(10); +insert into test select number, 'str_' || toString(number) from numbers(10, 10); +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(20, 10); +insert into test select number, NULL from numbers(30, 10); +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(40, 40); +insert into test select number, if(number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(10, 10); +insert into test select number, if(number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(10, 10); + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id, d; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test order by id, d; +select d.Int8, d.Date, d.`Array(String)` from test order by id, d; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test order by id, d; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test order by id, d; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test order by id, d; + +drop table test; + +{% endfor -%} diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference new file mode 100644 index 00000000000..ca6c5dbba82 --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.reference @@ -0,0 +1,20 @@ +Array(Array(Dynamic)) +Array(Variant(String, UInt64)) +LowCardinality(String) +None +String +UInt64 +360000 +360000 +200000 +200000 +0 +0 +20000 +20000 +200000 +200000 +20000 +20000 +200000 +0 diff --git a/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql new file mode 100644 index 00000000000..61dc8fca01a --- /dev/null +++ b/tests/queries/0_stateless/03036_dynamic_read_shared_subcolumns_wide_merge_tree.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql index d2c787040e5..fb23e15738e 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,22 +12,37 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql index f99bf771608..c098a3191e0 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_horizontal_compact_wide_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,22 +12,36 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql index be81596d043..17b1e451143 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_compact_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,23 +12,36 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); -system start merges test; -optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference index d0d777a5a38..b0be05f07a2 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.reference @@ -1,28 +1,66 @@ -50000 DateTime -60000 Date -70000 Array(UInt16) -80000 String -100000 None -100000 UInt64 -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -70000 Array(UInt16) -100000 None -100000 UInt64 -190000 String -200000 Map(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -10000 Tuple(UInt64, UInt64) -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -260000 String -100000 None -100000 UInt64 -200000 Map(UInt64, UInt64) -270000 String +50000 DateTime false +60000 Date false +70000 Array(UInt16) false +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String false +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +30000 String false +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +80000 String true +100000 None false +100000 UInt64 false +200000 Map(UInt64, UInt64) false +--------------------- +10000 Tuple(UInt64, UInt64) true +50000 DateTime true +60000 Date true +70000 Array(UInt16) true +100000 None false +100000 UInt64 true +110000 String false +200000 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql index f6396af42a8..fd6c0109263 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql +++ b/tests/queries/0_stateless/03037_dynamic_merges_1_vertical_wide_merge_tree.sql @@ -2,7 +2,7 @@ set allow_experimental_dynamic_type=1; drop table if exists test; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); @@ -12,22 +12,36 @@ insert into test select number, toDate(number) from numbers(60000); insert into test select number, toDateTime(number) from numbers(50000); insert into test select number, NULL from numbers(100000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(200000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(10000); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 b/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 index 96a854630ed..7d3bc371e36 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 +++ b/tests/queries/0_stateless/03037_dynamic_merges_small.reference.j2 @@ -1,112 +1,264 @@ -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String -5 DateTime -6 Date -7 Array(UInt16) -8 String -10 None -10 UInt64 -7 Array(UInt16) -10 None -10 UInt64 -19 String -7 Array(UInt16) -10 None -10 UInt64 -19 String -20 Map(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -1 Tuple(UInt64, UInt64) -10 None -10 UInt64 -20 Map(UInt64, UInt64) -26 String -10 None -10 UInt64 -20 Map(UInt64, UInt64) -27 String +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false +5 DateTime false +6 Date false +7 Array(UInt16) false +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String false +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +3 String false +5 DateTime true +6 Date true +7 Array(UInt16) true +8 String true +10 None false +10 UInt64 false +20 Map(UInt64, UInt64) false +--------------------- +1 Tuple(UInt64, UInt64) true +5 DateTime true +6 Date true +7 Array(UInt16) true +10 None false +10 UInt64 true +11 String false +20 Map(UInt64, UInt64) false diff --git a/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 b/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 index 263e92be403..3778399d0a4 100644 --- a/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 +++ b/tests/queries/0_stateless/03037_dynamic_merges_small.sql.j2 @@ -9,7 +9,7 @@ drop table if exists test; 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1'] -%} -create table test (id UInt64, d Dynamic(max_types=3)) engine={{ engine }}; +create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; system stop merges test; insert into test select number, number from numbers(10); @@ -19,23 +19,37 @@ insert into test select number, toDate(number) from numbers(6); insert into test select number, toDateTime(number) from numbers(5); insert into test select number, NULL from numbers(10); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final;; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, map(number, number) from numbers(20); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system stop merges test; insert into test select number, tuple(number, number) from numbers(1); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); system start merges test; optimize table test final; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(3); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql index 81888946681..8ba192cb5db 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_horizontal.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql index ba58ca471a2..1ea7eefdd53 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_compact_vertical.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 index ae07c164074..3d7e8b60f73 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.reference.j2 @@ -1,84 +1,256 @@ -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None -2 Tuple(a Dynamic(max_types=3)):Date -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):String -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 UInt64:None -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 UInt64:None -2 Tuple(a Dynamic(max_types=3)):DateTime -3 Tuple(a Dynamic(max_types=3)):Array(UInt8) -5 Tuple(a Dynamic(max_types=3)):UInt64 -7 Tuple(a Dynamic(max_types=3)):String -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -13 Tuple(a Dynamic(max_types=3)):None -5 Tuple(a Dynamic(max_types=3)):UInt64 -10 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -10 UInt64:None -12 Tuple(a Dynamic(max_types=3)):String -13 Tuple(a Dynamic(max_types=3)):None +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +14 Tuple(a Dynamic(max_types=2)):DateTime true +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +14 Tuple(a Dynamic(max_types=2)):DateTime true +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String true +14 Tuple(a Dynamic(max_types=2)):DateTime false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +test +2 Tuple(a Dynamic(max_types=2)):DateTime false +2 Tuple(a Dynamic(max_types=2)):IPv4 false +4 Tuple(a Dynamic(max_types=2)):Date false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):DateTime true +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +4 Tuple(a Dynamic(max_types=2)):DateTime false +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 false +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +6 Tuple(a Dynamic(max_types=2)):DateTime true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +8 Tuple(a Dynamic(max_types=2)):DateTime false +13 Tuple(a Dynamic(max_types=2)):String false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false +--------------------- +2 Tuple(a Dynamic(max_types=2)):IPv4 true +4 Tuple(a Dynamic(max_types=2)):Date true +6 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +7 Tuple(a Dynamic(max_types=2)):UInt64 true +8 String:None false +13 Tuple(a Dynamic(max_types=2)):String true +14 Tuple(a Dynamic(max_types=2)):DateTime false +20 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +26 Tuple(a Dynamic(max_types=2)):None false +28 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 index 7828c2af49c..8682b6cef81 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_small.sql.j2 @@ -10,25 +10,41 @@ drop table if exists test; 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1', 'MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1'] -%} -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +select 'test'; +create table test (id UInt64, d Dynamic(max_types=2)) engine={{ engine }}; system stop merges test; -insert into test select number, number from numbers(10); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(10); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(5); +insert into test select number, number from numbers(20); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(20); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(10); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(20); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(5); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(20); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(10); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(40); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(8); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql index a53c5b0b2a5..c6a09036c30 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_horizontal.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference index 4be740f6050..a4c2df74a74 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.reference @@ -1,21 +1,63 @@ -16667 Tuple(a Dynamic(max_types=3)):Date -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):String -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 UInt64:None -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 UInt64:None -16667 Tuple(a Dynamic(max_types=3)):DateTime -33333 Tuple(a Dynamic(max_types=3)):Array(UInt8) -50000 Tuple(a Dynamic(max_types=3)):UInt64 -66667 Tuple(a Dynamic(max_types=3)):String -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -133333 Tuple(a Dynamic(max_types=3)):None -50000 Tuple(a Dynamic(max_types=3)):UInt64 -100000 Tuple(a Dynamic(max_types=3)):Tuple(UInt64) -100000 UInt64:None -116667 Tuple(a Dynamic(max_types=3)):String -133333 Tuple(a Dynamic(max_types=3)):None +6667 Tuple(a Dynamic(max_types=2)):DateTime false +13333 Tuple(a Dynamic(max_types=2)):IPv4 false +16667 Tuple(a Dynamic(max_types=2)):Date false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) false +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +140000 UInt64:None false +--------------------- +6667 Tuple(a Dynamic(max_types=2)):DateTime true +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +16667 Tuple(a Dynamic(max_types=2)):DateTime false +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 false +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +23334 Tuple(a Dynamic(max_types=2)):DateTime true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +40000 Tuple(a Dynamic(max_types=2)):DateTime false +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false +--------------------- +13333 Tuple(a Dynamic(max_types=2)):IPv4 true +16667 Tuple(a Dynamic(max_types=2)):Date true +33333 Tuple(a Dynamic(max_types=2)):Array(UInt8) true +33334 Tuple(a Dynamic(max_types=2)):UInt64 true +40000 String:None false +63334 Tuple(a Dynamic(max_types=2)):DateTime true +66666 Tuple(a Dynamic(max_types=2)):String false +100000 Tuple(a Dynamic(max_types=2)):Tuple(UInt64) false +133333 Tuple(a Dynamic(max_types=2)):None false +140000 UInt64:None false diff --git a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql index 4256b010ec0..c1964c45d98 100644 --- a/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql +++ b/tests/queries/0_stateless/03038_nested_dynamic_merges_wide_vertical.sql @@ -6,24 +6,39 @@ set allow_experimental_dynamic_type = 1; set enable_named_columns_in_function_tuple = 0; drop table if exists test;; -create table test (id UInt64, d Dynamic(max_types=3)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; system stop merges test; insert into test select number, number from numbers(100000); -insert into test select number, tuple(if(number % 2 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=3)) from numbers(100000); -insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=3)) from numbers(50000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; -insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(50000); -insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=3)) from numbers(200000); +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; system start merges test; optimize table test final; -select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=3))`.a) as type from test group by type order by count(), type; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; drop table test; diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference index 2ec301b747b..9386548c74d 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.reference @@ -1,13 +1,13 @@ initial insert alter add column 1 -3 None +3 None false 0 0 \N \N \N 0 1 1 \N \N \N 0 2 2 \N \N \N 0 insert after alter add column 1 -4 String -4 UInt64 -7 None +4 String false +4 UInt64 false +7 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 @@ -24,147 +24,158 @@ insert after alter add column 1 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 alter modify column 1 -7 None -8 String +4 String true +4 UInt64 true +7 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 insert after alter modify column 1 -8 None -11 String +1 Date true +5 String true +5 UInt64 true +8 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 alter modify column 2 -8 None -11 String +1 Date true +5 String true +5 UInt64 true +8 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 insert after alter modify column 2 -1 Date -1 UInt64 -9 None -12 String +1 String false +1 UInt64 false +2 Date true +5 String true +5 UInt64 true +9 None false 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 19 19 \N \N \N \N 0 20 20 20 \N 20 \N 0 21 21 str_21 str_21 \N \N 0 22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 -1 Date -1 UInt64 -9 None -12 String +1 String false +1 UInt64 false +2 Date true +5 String true +5 UInt64 true +9 None false 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 -1 Date -1 UInt64 -12 None -12 String +1 String false +1 UInt64 false +2 Date true +5 String true +5 UInt64 true +12 None false 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql index 4ab700306d4..de05ba36177 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_compact_merge_tree.sql @@ -9,7 +9,7 @@ insert into test select number, number from numbers(3); select 'alter add column 1'; alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter add column 1'; @@ -17,37 +17,37 @@ insert into test select number, number, number from numbers(3, 3); insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); insert into test select number, number, NULL from numbers(9, 3); insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'alter modify column 1'; -alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +alter table test modify column d Dynamic(max_types=0) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter modify column 1'; insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'alter modify column 2'; -alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +alter table test modify column d Dynamic(max_types=2) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter modify column 2'; insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'alter modify column 3'; alter table test modify column y Dynamic settings mutations_sync=1; -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; select 'insert after alter modify column 3'; insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); -select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; drop table test; \ No newline at end of file diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference index c592528c3cd..d7123288280 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_memory.reference @@ -24,48 +24,28 @@ insert after alter add column 1 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 alter modify column 1 +4 String +4 UInt64 7 None -8 String 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -4 UInt64 -7 String +1 Date +5 String +5 UInt64 8 None 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 @@ -83,13 +63,37 @@ alter modify column 2 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 +18 18 1970-01-19 \N \N 1970-01-19 0 +alter modify column 2 1 Date +5 String 5 UInt64 -8 String +8 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +insert after alter modify column 2 +2 Date +6 String +6 UInt64 9 None 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 @@ -107,17 +111,17 @@ insert after alter modify column 2 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 19 19 \N \N \N \N 0 20 20 20 \N 20 \N 0 21 21 str_21 str_21 \N \N 0 22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 -1 Date -5 UInt64 -8 String +2 Date +6 String +6 UInt64 9 None 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 @@ -135,17 +139,17 @@ alter modify column 3 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 -1 Date -5 UInt64 -8 String +2 Date +6 String +6 UInt64 12 None 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 @@ -163,9 +167,9 @@ insert after alter modify column 3 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 diff --git a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference index 2ec301b747b..d7123288280 100644 --- a/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference +++ b/tests/queries/0_stateless/03040_dynamic_type_alters_1_wide_merge_tree.reference @@ -24,147 +24,152 @@ insert after alter add column 1 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 alter modify column 1 +4 String +4 UInt64 7 None -8 String 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 insert after alter modify column 1 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -alter modify column 2 -8 None -11 String -0 0 \N \N \N \N 0 -1 1 \N \N \N \N 0 -2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 -6 6 str_6 str_6 \N \N 0 -7 7 str_7 str_7 \N \N 0 -8 8 str_8 str_8 \N \N 0 -9 9 \N \N \N \N 0 -10 10 \N \N \N \N 0 -11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 -13 13 str_13 str_13 \N \N 0 -14 14 \N \N \N \N 0 -15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 -17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 -insert after alter modify column 2 1 Date -1 UInt64 -9 None -12 String +5 String +5 UInt64 +8 None 0 0 \N \N \N \N 0 1 1 \N \N \N \N 0 2 2 \N \N \N \N 0 -3 3 3 3 \N \N 0 -4 4 4 4 \N \N 0 -5 5 5 5 \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 6 6 str_6 str_6 \N \N 0 7 7 str_7 str_7 \N \N 0 8 8 str_8 str_8 \N \N 0 9 9 \N \N \N \N 0 10 10 \N \N \N \N 0 11 11 \N \N \N \N 0 -12 12 12 12 \N \N 0 +12 12 12 \N 12 \N 0 13 13 str_13 str_13 \N \N 0 14 14 \N \N \N \N 0 15 15 \N \N \N \N 0 -16 16 16 16 \N \N 0 +16 16 16 \N 16 \N 0 17 17 str_17 str_17 \N \N 0 -18 18 1970-01-19 1970-01-19 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +alter modify column 2 +1 Date +5 String +5 UInt64 +8 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 +insert after alter modify column 2 +2 Date +6 String +6 UInt64 +9 None +0 0 \N \N \N \N 0 +1 1 \N \N \N \N 0 +2 2 \N \N \N \N 0 +3 3 3 \N 3 \N 0 +4 4 4 \N 4 \N 0 +5 5 5 \N 5 \N 0 +6 6 str_6 str_6 \N \N 0 +7 7 str_7 str_7 \N \N 0 +8 8 str_8 str_8 \N \N 0 +9 9 \N \N \N \N 0 +10 10 \N \N \N \N 0 +11 11 \N \N \N \N 0 +12 12 12 \N 12 \N 0 +13 13 str_13 str_13 \N \N 0 +14 14 \N \N \N \N 0 +15 15 \N \N \N \N 0 +16 16 16 \N 16 \N 0 +17 17 str_17 str_17 \N \N 0 +18 18 1970-01-19 \N \N 1970-01-19 0 19 19 \N \N \N \N 0 20 20 20 \N 20 \N 0 21 21 str_21 str_21 \N \N 0 22 22 1970-01-23 \N \N 1970-01-23 0 alter modify column 3 -1 Date -1 UInt64 +2 Date +6 String +6 UInt64 9 None -12 String 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 22 22 22 \N 0 \N \N 1970-01-23 0 insert after alter modify column 3 -1 Date -1 UInt64 +2 Date +6 String +6 UInt64 12 None -12 String 0 0 0 \N 0 \N \N \N 0 1 1 1 \N 0 \N \N \N 0 2 2 2 \N 0 \N \N \N 0 -3 3 3 \N 0 3 \N \N 0 -4 4 4 \N 0 4 \N \N 0 -5 5 5 \N 0 5 \N \N 0 +3 3 3 \N 0 \N 3 \N 0 +4 4 4 \N 0 \N 4 \N 0 +5 5 5 \N 0 \N 5 \N 0 6 6 6 \N 0 str_6 \N \N 0 7 7 7 \N 0 str_7 \N \N 0 8 8 8 \N 0 str_8 \N \N 0 9 9 9 \N 0 \N \N \N 0 10 10 10 \N 0 \N \N \N 0 11 11 11 \N 0 \N \N \N 0 -12 12 12 \N 0 12 \N \N 0 +12 12 12 \N 0 \N 12 \N 0 13 13 13 \N 0 str_13 \N \N 0 14 14 14 \N 0 \N \N \N 0 15 15 15 \N 0 \N \N \N 0 -16 16 16 \N 0 16 \N \N 0 +16 16 16 \N 0 \N 16 \N 0 17 17 17 \N 0 str_17 \N \N 0 -18 18 18 \N 0 1970-01-19 \N \N 0 +18 18 18 \N 0 \N \N 1970-01-19 0 19 19 19 \N 0 \N \N \N 0 20 20 20 \N 0 \N 20 \N 0 21 21 21 \N 0 str_21 \N \N 0 diff --git a/tests/queries/0_stateless/03041_dynamic_type_check_table.sh b/tests/queries/0_stateless/03041_dynamic_type_check_table.sh index c8bd533e253..da24b892cbd 100755 --- a/tests/queries/0_stateless/03041_dynamic_type_check_table.sh +++ b/tests/queries/0_stateless/03041_dynamic_type_check_table.sh @@ -13,7 +13,7 @@ function run() $CH_CLIENT -q "insert into test select number, number from numbers(3)" echo "alter add column" - $CH_CLIENT -q "alter table test add column d Dynamic(max_types=3) settings mutations_sync=1" + $CH_CLIENT -q "alter table test add column d Dynamic(max_types=2) settings mutations_sync=1" $CH_CLIENT -q "select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d)" $CH_CLIENT -q "select x, y, d, d.String, d.UInt64, d.\`Tuple(a UInt64)\`.a from test order by x" diff --git a/tests/queries/0_stateless/03096_http_interface_role_query_param.sh b/tests/queries/0_stateless/03096_http_interface_role_query_param.sh index 5ae2d5f2f18..db42ee6bb41 100755 --- a/tests/queries/0_stateless/03096_http_interface_role_query_param.sh +++ b/tests/queries/0_stateless/03096_http_interface_role_query_param.sh @@ -21,7 +21,7 @@ CHANGED_SETTING_VALUE="42" SHOW_CURRENT_ROLES_QUERY="SELECT role_name FROM system.current_roles ORDER BY role_name ASC" SHOW_CHANGED_SETTINGS_QUERY="SELECT name, value FROM system.settings WHERE changed = 1 AND name = '$CHANGED_SETTING_NAME' ORDER BY name ASC" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP USER IF EXISTS $TEST_USER; DROP ROLE IF EXISTS $TEST_ROLE1; DROP ROLE IF EXISTS $TEST_ROLE2; @@ -94,7 +94,7 @@ OUT=$($CLICKHOUSE_CURL -u $TEST_USER_AUTH -sS "$CLICKHOUSE_URL&role=$TEST_ROLE1& echo -ne $OUT | grep -o "Code: 512" || echo "expected code 512, got: $OUT" echo -ne $OUT | grep -o "SET_NON_GRANTED_ROLE" || echo "expected SET_NON_GRANTED_ROLE error, got: $OUT" -$CLICKHOUSE_CLIENT -n --query " +$CLICKHOUSE_CLIENT --query " DROP USER $TEST_USER; DROP ROLE $TEST_ROLE1; DROP ROLE $TEST_ROLE2; diff --git a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql index 4e42d94fd79..b3d1827d98f 100644 --- a/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql +++ b/tests/queries/0_stateless/03130_convert_outer_join_to_inner_join.sql @@ -6,14 +6,18 @@ CREATE TABLE test_table_1 ( id UInt64, value String -) ENGINE=MergeTree ORDER BY id; +) ENGINE=MergeTree ORDER BY id +SETTINGS index_granularity = 16 # We have number of granules in the `EXPLAIN` output in reference file +; DROP TABLE IF EXISTS test_table_2; CREATE TABLE test_table_2 ( id UInt64, value String -) ENGINE=MergeTree ORDER BY id; +) ENGINE=MergeTree ORDER BY id +SETTINGS index_granularity = 16 +; INSERT INTO test_table_1 VALUES (1, 'Value_1'), (2, 'Value_2'); INSERT INTO test_table_2 VALUES (2, 'Value_2'), (3, 'Value_3'); diff --git a/tests/queries/0_stateless/03140_client_subsequent_external_tables.sh b/tests/queries/0_stateless/03140_client_subsequent_external_tables.sh index af75bf42172..64398a515f5 100755 --- a/tests/queries/0_stateless/03140_client_subsequent_external_tables.sh +++ b/tests/queries/0_stateless/03140_client_subsequent_external_tables.sh @@ -7,7 +7,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) INPUT_FILE=$CUR_DIR/$CLICKHOUSE_DATABASE.tsv echo "foo" > "$INPUT_FILE" -$CLICKHOUSE_CLIENT --external --file="$INPUT_FILE" --name=t --structure='x String' -nm -q " +$CLICKHOUSE_CLIENT --external --file="$INPUT_FILE" --name=t --structure='x String' -m -q " select * from t; select * from t; " diff --git a/tests/queries/0_stateless/03143_prewhere_profile_events.sh b/tests/queries/0_stateless/03143_prewhere_profile_events.sh index 00daa0fe7cc..6a6b993e5f8 100755 --- a/tests/queries/0_stateless/03143_prewhere_profile_events.sh +++ b/tests/queries/0_stateless/03143_prewhere_profile_events.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " DROP TABLE IF EXISTS t; CREATE TABLE t(a UInt32, b UInt32, c UInt32, d UInt32) ENGINE=MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part=0, min_rows_for_wide_part=0; @@ -25,7 +25,7 @@ client_opts=( --max_threads 8 ) -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_1" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_1" -q " SELECT * FROM t PREWHERE (b % 8192) = 42 @@ -33,7 +33,7 @@ PREWHERE (b % 8192) = 42 FORMAT Null " -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_2" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_2" -q " SELECT * FROM t PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 @@ -42,7 +42,7 @@ PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 settings enable_multiple_prewhere_read_steps=1; " -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_3" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_3" -q " SELECT * FROM t PREWHERE (b % 8192) = 42 AND (c % 16384) = 42 @@ -51,7 +51,7 @@ PREWHERE (b % 8192) = 42 AND (c % 16384) = 42 settings enable_multiple_prewhere_read_steps=0; " -${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_4" -nq " +${CLICKHOUSE_CLIENT} "${client_opts[@]}" --query_id "$query_id_4" -q " SELECT b, c FROM t PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 @@ -59,7 +59,7 @@ PREWHERE (b % 8192) = 42 AND (c % 8192) = 42 settings enable_multiple_prewhere_read_steps=1; " -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " SYSTEM FLUSH LOGS; -- 52503 which is 43 * number of granules, 10000000 diff --git a/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh b/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh index 95aef9bbc5b..4e7b318e202 100755 --- a/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh +++ b/tests/queries/0_stateless/03145_non_loaded_projection_backup.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " drop table if exists tp_1; create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y partition by intDiv(y, 100) settings max_parts_to_merge_at_once=1; insert into tp_1 select number, number from numbers(3); @@ -25,7 +25,7 @@ alter table tp_1 drop projection pp; alter table tp_1 attach partition '0'; " -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; check table tp_1 settings check_query_single_value_result = 0;" | grep -o "Found unexpected projection directories: pp.proj" @@ -34,19 +34,19 @@ $CLICKHOUSE_CLIENT -q " backup table tp_1 to Disk('backups', '$backup_id'); " | grep -o "BACKUP_CREATED" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; drop table tp_1; restore table tp_1 from Disk('backups', '$backup_id'); " | grep -o "RESTORED" $CLICKHOUSE_CLIENT -q "select count() from tp_1;" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; check table tp_1 settings check_query_single_value_result = 0;" | grep -o "Found unexpected projection directories: pp.proj" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; check table tp_1" -$CLICKHOUSE_CLIENT -nm -q " +$CLICKHOUSE_CLIENT -m -q " set send_logs_level='fatal'; drop table tp_1" diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference index 0b76d30953e..370e6352657 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.reference @@ -1,35 +1,35 @@ 1 2024-01-01 Date 2 1704056400 Decimal(18, 3) -3 1 String -4 2 String +3 1 Float32 +4 2 Float64 1 2024-01-01 Date 1 2024-01-01 Date 2 1704056400 Decimal(18, 3) 2 1704056400 Decimal(18, 3) -3 1 String -3 1 String -4 2 String -4 2 String - -1 2024-01-01 String -1 2024-01-01 String -2 1704056400 String -2 1704056400 String -3 1 String -3 1 String -4 2 String -4 2 String +3 1 Float32 +3 1 Float32 +4 2 Float64 +4 2 Float64 1 2024-01-01 Date -1 2024-01-01 String -1 2024-01-01 String +1 2024-01-01 Date 2 1704056400 Decimal(18, 3) -2 1704056400 String -2 1704056400 String -3 1 String -3 1 String -3 1 String -4 2 String -4 2 String -4 2 String +2 1704056400 Decimal(18, 3) +3 1 Float32 +3 1 Float32 +4 2 Float64 +4 2 Float64 + +1 2024-01-01 Date +1 2024-01-01 Date +1 2024-01-01 Date +2 1704056400 Decimal(18, 3) +2 1704056400 Decimal(18, 3) +2 1704056400 Decimal(18, 3) +3 1 Float32 +3 1 Float32 +3 1 Float32 +4 2 Float64 +4 2 Float64 +4 2 Float64 diff --git a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql index ad5ea9512c6..71d5dd4abd1 100644 --- a/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql +++ b/tests/queries/0_stateless/03150_dynamic_type_mv_insert.sql @@ -1,5 +1,6 @@ SET allow_experimental_dynamic_type=1; +DROP TABLE IF EXISTS null_table; CREATE TABLE null_table ( n1 UInt8, @@ -7,9 +8,11 @@ CREATE TABLE null_table ) ENGINE = Null; +DROP VIEW IF EXISTS dummy_rmv; CREATE MATERIALIZED VIEW dummy_rmv TO to_table AS SELECT * FROM null_table; +DROP TABLE IF EXISTS to_table; CREATE TABLE to_table ( n1 UInt8, @@ -32,3 +35,7 @@ select ''; ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=10); INSERT INTO null_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; + +DROP TABLE null_table; +DROP VIEW dummy_rmv; +DROP TABLE to_table; diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference index d96fbf658d8..2d3b2f118f6 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.reference @@ -1,26 +1,26 @@ -1 2024-01-01 Date -2 1704056400 String -3 1 String -4 2 String +1 2024-01-01 Date false +2 1704056400 Decimal(18, 3) false +3 1 Float32 true +4 2 Float64 true -1 2024-01-01 Date -1 2024-01-01 Date -2 1704056400 Decimal(18, 3) -2 1704056400 String -3 1 Float32 -3 1 String -4 2 Float64 -4 2 String +1 2024-01-01 Date false +1 2024-01-01 Date false +2 1704056400 Decimal(18, 3) false +2 1704056400 Decimal(18, 3) false +3 1 Float32 false +3 1 Float32 false +4 2 Float64 false +4 2 Float64 false -1 2024-01-01 String -1 2024-01-01 String -1 2024-01-01 String -2 1704056400 String -2 1704056400 String -2 1704056400 String -3 1 String -3 1 String -3 1 String -4 2 String -4 2 String -4 2 String +1 2024-01-01 Date true +1 2024-01-01 Date true +1 2024-01-01 Date true +2 1704056400 Decimal(18, 3) true +2 1704056400 Decimal(18, 3) true +2 1704056400 Decimal(18, 3) true +3 1 Float32 true +3 1 Float32 true +3 1 Float32 true +4 2 Float64 true +4 2 Float64 true +4 2 Float64 true diff --git a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql index 632f3504fdb..e476d34a1db 100644 --- a/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql +++ b/tests/queries/0_stateless/03151_dynamic_type_scale_max_types.sql @@ -11,16 +11,16 @@ CREATE TABLE to_table ENGINE = MergeTree ORDER BY n1; INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); -SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; select ''; ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=5); INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); -SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; select ''; -ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=1); +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=0); INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); -SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=500); -- { serverError UNEXPECTED_AST_STRUCTURE } diff --git a/tests/queries/0_stateless/03152_dynamic_type_simple.reference b/tests/queries/0_stateless/03152_dynamic_type_simple.reference index 5f243209ff3..e508bdd1990 100644 --- a/tests/queries/0_stateless/03152_dynamic_type_simple.reference +++ b/tests/queries/0_stateless/03152_dynamic_type_simple.reference @@ -7,7 +7,7 @@ string2 String \N None 42 Int64 42 Int64 string String -string String [1, 2] String +string String [1,2] Array(Int64) [1,2] Array(Int64) \N None ┌─d────────────────────────┬─dynamicType(d)─┬─d.Int64─┬─d.String─┬─────d.Date─┬─d.Float64─┬──────────d.DateTime─┬─d.Array(Int64)─┬─d.Array(String)──────────┐ 1. │ 42 │ Int64 │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ [] │ [] │ diff --git a/tests/queries/0_stateless/03152_dynamic_type_simple.sql b/tests/queries/0_stateless/03152_dynamic_type_simple.sql index fd5328faf15..ed24b213b1c 100644 --- a/tests/queries/0_stateless/03152_dynamic_type_simple.sql +++ b/tests/queries/0_stateless/03152_dynamic_type_simple.sql @@ -1,14 +1,17 @@ SET allow_experimental_dynamic_type=1; +DROP TABLE IF EXISTS test_max_types; CREATE TABLE test_max_types (d Dynamic(max_types=5)) ENGINE = Memory; INSERT INTO test_max_types VALUES ('string1'), (42), (3.14), ([1, 2]), (toDate('2021-01-01')), ('string2'); SELECT d, dynamicType(d) FROM test_max_types; SELECT ''; +DROP TABLE IF EXISTS test_nested_dynamic; CREATE TABLE test_nested_dynamic (d1 Dynamic, d2 Dynamic(max_types=2)) ENGINE = Memory; INSERT INTO test_nested_dynamic VALUES (NULL, 42), (42, 'string'), ('string', [1, 2]), ([1, 2], NULL); SELECT d1, dynamicType(d1), d2, dynamicType(d2) FROM test_nested_dynamic; +DROP TABLE IF EXISTS test_rapid_schema; CREATE TABLE test_rapid_schema (d Dynamic) ENGINE = Memory; INSERT INTO test_rapid_schema VALUES (42), ('string1'), (toDate('2021-01-01')), ([1, 2, 3]), (3.14), ('string2'), (toDateTime('2021-01-01 12:00:00')), (['array', 'of', 'strings']), (NULL), (toFloat64(42.42)); @@ -27,3 +30,8 @@ FROM FROM numbers(10000) ) ); + +DROP TABLE test_max_types; +DROP TABLE test_nested_dynamic; +DROP TABLE test_rapid_schema; + diff --git a/tests/queries/0_stateless/03153_dynamic_type_empty.sql b/tests/queries/0_stateless/03153_dynamic_type_empty.sql index 8e942fe6f6e..3a0c98e63ee 100644 --- a/tests/queries/0_stateless/03153_dynamic_type_empty.sql +++ b/tests/queries/0_stateless/03153_dynamic_type_empty.sql @@ -1,5 +1,7 @@ SET allow_experimental_dynamic_type=1; +DROP TABLE IF EXISTS test_null_empty; CREATE TABLE test_null_empty (d Dynamic) ENGINE = Memory; INSERT INTO test_null_empty VALUES ([]), ([1]), ([]), (['1']), ([]), (()),((1)), (()), (('1')), (()), ({}), ({1:2}), ({}), ({'1':'2'}), ({}); SELECT d, dynamicType(d) FROM test_null_empty; +DROP TABLE test_null_empty; diff --git a/tests/queries/0_stateless/03155_test_move_to_prewhere.sh b/tests/queries/0_stateless/03155_test_move_to_prewhere.sh index b6980b3a23a..f33a6b3ef27 100755 --- a/tests/queries/0_stateless/03155_test_move_to_prewhere.sh +++ b/tests/queries/0_stateless/03155_test_move_to_prewhere.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE event_envoy ( timestamp_interval DateTime CODEC(DoubleDelta), @@ -18,7 +18,7 @@ ${CLICKHOUSE_CLIENT} -nq " INSERT INTO event_envoy SELECT now() - number, 'us-east-1', 'ch_super_fast' FROM numbers_mt(1e5); " -${CLICKHOUSE_CLIENT} -nq " +${CLICKHOUSE_CLIENT} -q " CREATE TABLE event_envoy_remote ( timestamp_interval DateTime CODEC(DoubleDelta), diff --git a/tests/queries/0_stateless/03156_default_multiquery_split.sh b/tests/queries/0_stateless/03156_default_multiquery_split.sh index 8ba2f46b786..d849fb5a162 100755 --- a/tests/queries/0_stateless/03156_default_multiquery_split.sh +++ b/tests/queries/0_stateless/03156_default_multiquery_split.sh @@ -53,6 +53,6 @@ SELECT * FROM TEST2 ORDER BY value; DROP TABLE TEST1; DROP TABLE TEST2; EOF -$CLICKHOUSE_CLIENT -m -n < "$SQL_FILE_NAME" +$CLICKHOUSE_CLIENT -m < "$SQL_FILE_NAME" rm "$SQL_FILE_NAME" diff --git a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql index d302205ca23..fffea1bd0f5 100644 --- a/tests/queries/0_stateless/03159_dynamic_type_all_types.sql +++ b/tests/queries/0_stateless/03159_dynamic_type_all_types.sql @@ -6,7 +6,7 @@ SET allow_experimental_variant_type=1; SET allow_suspicious_low_cardinality_types=1; -CREATE TABLE t (d Dynamic(max_types=255)) ENGINE = Memory; +CREATE TABLE t (d Dynamic(max_types=254)) ENGINE = Memory; -- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); @@ -84,7 +84,7 @@ INSERT INTO t VALUES ([(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, [ SELECT dynamicType(d), d FROM t ORDER BY substring(dynamicType(d),1,1), length(dynamicType(d)), d; -CREATE TABLE t2 (d Dynamic(max_types=255)) ENGINE = Memory; +CREATE TABLE t2 (d Dynamic(max_types=254)) ENGINE = Memory; INSERT INTO t2 SELECT * FROM t; SELECT ''; diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference index c5a6cbab0bc..8edf541c2a0 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.reference +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.reference @@ -1,2 +1,90 @@ -1231 John 33 +compact part +testing throw default mode +-- { echoOn } + +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +SELECT 'testing drop mode'; +testing drop mode +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users_compact WHERE uid = 1231; +SELECT * FROM users_compact ORDER BY uid; +SYSTEM FLUSH LOGS; +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +all_1_1_0_2 +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +SELECT 'testing rebuild mode'; +testing rebuild mode +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +DELETE FROM users_compact WHERE uid = 6666; +SELECT * FROM users_compact ORDER BY uid; 8888 Alice 50 +SYSTEM FLUSH LOGS; +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); +all_1_1_0_4 +all_3_3_0_4 +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; +p1 all_3_3_0_4 +p2 all_3_3_0_4 +wide part +testing throw default mode +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } +SELECT 'testing drop mode'; +testing drop mode +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; +DELETE FROM users_wide WHERE uid = 1231; +SELECT * FROM users_wide ORDER BY uid; +SYSTEM FLUSH LOGS; +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +all_1_1_0_2 +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +SELECT 'testing rebuild mode'; +testing rebuild mode +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; +DELETE FROM users_wide WHERE uid = 6666; +SELECT * FROM users_wide ORDER BY uid; +8888 Alice 50 +SYSTEM FLUSH LOGS; +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); +all_1_1_0_4 +all_3_3_0_4 +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; +p1 all_3_3_0_4 +p2 all_3_3_0_4 diff --git a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql index b189388e356..da6427cbf22 100644 --- a/tests/queries/0_stateless/03161_lightweight_delete_projection.sql +++ b/tests/queries/0_stateless/03161_lightweight_delete_projection.sql @@ -1,31 +1,145 @@ +-- For cloud version, should also consider min_bytes_for_full_part_storage since packed storage exists, +-- but for less redundancy, just let CI test the parameter. -DROP TABLE IF EXISTS users; +SET lightweight_deletes_sync = 2, alter_sync = 2; -CREATE TABLE users ( +DROP TABLE IF EXISTS users_compact; + + +SELECT 'compact part'; + +CREATE TABLE users_compact ( uid Int16, name String, age Int16, projection p1 (select count(), age group by age), projection p2 (select age, name group by age, name) -) ENGINE = MergeTree order by uid; +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 10485760; -INSERT INTO users VALUES (1231, 'John', 33); -INSERT INTO users VALUES (6666, 'Ksenia', 48); -INSERT INTO users VALUES (8888, 'Alice', 50); +INSERT INTO users_compact VALUES (1231, 'John', 33); -DELETE FROM users WHERE 1; -- { serverError NOT_IMPLEMENTED } +SELECT 'testing throw default mode'; -DELETE FROM users WHERE uid = 8888 SETTINGS lightweight_mutation_projection_mode = 'throw'; -- { serverError NOT_IMPLEMENTED } +-- { echoOn } -DELETE FROM users WHERE uid = 6666 SETTINGS lightweight_mutation_projection_mode = 'drop'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } + +SELECT 'testing drop mode'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users_compact WHERE uid = 1231; + +SELECT * FROM users_compact ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -- expecting no projection SELECT - name, - `table` + name, parent_name FROM system.projection_parts -WHERE (database = currentDatabase()) AND (`table` = 'users'); +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); -SELECT * FROM users ORDER BY uid; +SELECT 'testing rebuild mode'; +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); -DROP TABLE users; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users_compact WHERE uid = 6666; + +SELECT * FROM users_compact ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; + +-- { echoOff } + +DROP TABLE users_compact; + + +SELECT 'wide part'; +CREATE TABLE users_wide ( + uid Int16, + name String, + age Int16, + projection p1 (select count(), age group by age), + projection p2 (select age, name group by age, name) +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO users_wide VALUES (1231, 'John', 33); + +SELECT 'testing throw default mode'; + +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } + +SELECT 'testing drop mode'; +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users_wide WHERE uid = 1231; + +SELECT * FROM users_wide ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +SELECT 'testing rebuild mode'; +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users_wide WHERE uid = 6666; + +SELECT * FROM users_wide ORDER BY uid; + +SYSTEM FLUSH LOGS; + +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; + +-- { echoOff } + +DROP TABLE users_wide; diff --git a/tests/queries/0_stateless/03169_time_virtual_column.sh b/tests/queries/0_stateless/03169_time_virtual_column.sh index fef1de8c6f2..b289f39accb 100755 --- a/tests/queries/0_stateless/03169_time_virtual_column.sh +++ b/tests/queries/0_stateless/03169_time_virtual_column.sh @@ -6,7 +6,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) echo "1,2" > $CLICKHOUSE_TEST_UNIQUE_NAME.csv sleep 1 -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " select _size, (dateDiff('millisecond', _time, now()) < 600000 AND dateDiff('millisecond', _time, now()) > 0) from file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv'); " rm $CLICKHOUSE_TEST_UNIQUE_NAME.csv diff --git a/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh b/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh index 9b57e5c8718..b9bab2bd70b 100755 --- a/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh +++ b/tests/queries/0_stateless/03172_dynamic_binary_serialization.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh $CLICKHOUSE_CLIENT -q "drop table if exists test" -$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 -q "create table test (id UInt64, d Dynamic(max_types=255)) engine=Memory" +$CLICKHOUSE_CLIENT --allow_experimental_dynamic_type=1 -q "create table test (id UInt64, d Dynamic(max_types=254)) engine=Memory" $CLICKHOUSE_CLIENT -q "insert into test select 0, NULL" $CLICKHOUSE_CLIENT -q "insert into test select 1, materialize(42)::UInt8" @@ -58,6 +58,6 @@ $CLICKHOUSE_CLIENT -q "insert into test select 47, materialize([[(20, 20), (50, $CLICKHOUSE_CLIENT -q "insert into test select 48, materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]])::MultiPolygon" $CLICKHOUSE_CLIENT -q "insert into test select 49, materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])" -$CLICKHOUSE_CLIENT -q "select * from test format RowBinary" | $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --input-format RowBinary --structure 'id UInt64, d Dynamic(max_types=255)' -q "select d, dynamicType(d) from table order by id" +$CLICKHOUSE_CLIENT -q "select * from test format RowBinary" | $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --input-format RowBinary --structure 'id UInt64, d Dynamic(max_types=254)' -q "select d, dynamicType(d) from table order by id" $CLICKHOUSE_CLIENT -q "drop table test" diff --git a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh index 7c567c0f58f..af702569794 100755 --- a/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh +++ b/tests/queries/0_stateless/03173_parallel_replicas_join_bug.sh @@ -5,7 +5,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -nq " +$CLICKHOUSE_CLIENT -q " CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference index 1ba147f9627..7de0804e0f2 100644 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.reference @@ -110,5 +110,5 @@ true Bool [{42:(1,[(2,{1:2})])}] Dynamic [{42:(1,[(2,{1:2})])}] Dynamic(max_types=10) [{42:(1,[(2,{1:2})])}] Dynamic(max_types=10) -[{42:(1,[(2,{1:2})])}] Dynamic(max_types=255) -[{42:(1,[(2,{1:2})])}] Dynamic(max_types=255) +[{42:(1,[(2,{1:2})])}] Dynamic(max_types=254) +[{42:(1,[(2,{1:2})])}] Dynamic(max_types=254) diff --git a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh index 0c585d36348..1e674a29072 100755 --- a/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh +++ b/tests/queries/0_stateless/03173_row_binary_and_native_with_binary_encoded_types.sh @@ -6,8 +6,8 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) function test { - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" - $CLICKHOUSE_LOCAL --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --stacktrace --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_binary_encode_types_in_binary_format=1 -q "select $1 as value format RowBinaryWithNamesAndTypes" | $CLICKHOUSE_LOCAL --input-format RowBinaryWithNamesAndTypes --input_format_binary_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" + $CLICKHOUSE_LOCAL --stacktrace --allow_experimental_dynamic_type=1 --allow_experimental_variant_type=1 --output_format_native_encode_types_in_binary_format=1 -q "select $1 as value format Native" | $CLICKHOUSE_LOCAL --input-format Native --input_format_native_decode_types_in_binary_format=1 -q "select value, toTypeName(value) from table" } test "materialize(42)::UInt8" @@ -66,4 +66,4 @@ test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])" test "materialize(42::UInt32)::Variant(UInt32, String, Tuple(a UInt32, b Array(Map(String, String))))" test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic" test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic(max_types=10)" -test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic(max_types=255)" +test "materialize([map(42, tuple(1, [tuple(2, map(1, 2))]))])::Dynamic(max_types=254)" diff --git a/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh b/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh index ce53f467823..583257d8fd3 100755 --- a/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh +++ b/tests/queries/0_stateless/03198_settings_in_csv_tsv_schema_cache.sh @@ -5,25 +5,25 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo -e 'a,b,c\n1,2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_skip_first_lines=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_skip_first_lines=0; SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%skip_first_lines%';" echo -e 'a,b,c\n"1",2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_numbers_from_strings=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_numbers_from_strings=0; SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%try_infer_numbers_from_strings%';" echo -e 'a,b,c\n"(1,2,3)",2,3' > $CLICKHOUSE_TEST_UNIQUE_NAME.csv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_strings_from_quoted_tuples=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.csv') SETTINGS input_format_csv_try_infer_strings_from_quoted_tuples=0; SELECT count() from system.schema_inference_cache where format = 'CSV' and additional_format_info like '%try_infer_strings_from_quoted_tuples%';" echo -e 'a\tb\tc\n1\t2\t3' > $CLICKHOUSE_TEST_UNIQUE_NAME.tsv -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.tsv') SETTINGS input_format_tsv_skip_first_lines=1; DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.tsv') SETTINGS input_format_tsv_skip_first_lines=0; SELECT count() from system.schema_inference_cache where format = 'TSV' and additional_format_info like '%skip_first_lines%';" diff --git a/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh b/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh index 4f217935123..c759cc34425 100755 --- a/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh +++ b/tests/queries/0_stateless/03198_unload_primary_key_outdated.sh @@ -4,7 +4,7 @@ CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) # shellcheck source=../shell_config.sh . "$CURDIR"/../shell_config.sh -$CLICKHOUSE_CLIENT -n " +$CLICKHOUSE_CLIENT " DROP TABLE IF EXISTS t_unload_primary_key; CREATE TABLE t_unload_primary_key (a UInt64, b UInt64) @@ -26,7 +26,7 @@ for _ in {1..100}; do sleep 0.3 done -$CLICKHOUSE_CLIENT -n " +$CLICKHOUSE_CLIENT " SELECT name, active, primary_key_bytes_in_memory FROM system.parts WHERE database = '$CLICKHOUSE_DATABASE' AND table = 't_unload_primary_key' ORDER BY name; DROP TABLE IF EXISTS t_unload_primary_key; " diff --git a/tests/queries/0_stateless/03199_dictionary_table_access.sh b/tests/queries/0_stateless/03199_dictionary_table_access.sh index 952b466b5da..14f017c7fbc 100755 --- a/tests/queries/0_stateless/03199_dictionary_table_access.sh +++ b/tests/queries/0_stateless/03199_dictionary_table_access.sh @@ -8,7 +8,7 @@ username="user_${CLICKHOUSE_TEST_UNIQUE_NAME}" dictname="dict_${CLICKHOUSE_TEST_UNIQUE_NAME}" dicttablename="dict_table_${CLICKHOUSE_TEST_UNIQUE_NAME}" -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " CREATE DICTIONARY IF NOT EXISTS ${dictname} ( id UInt64, @@ -26,15 +26,15 @@ ${CLICKHOUSE_CLIENT} -nm --query " SELECT * FROM ${dicttablename}; " -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT * FROM ${dictname}; " 2>&1 | grep -o ACCESS_DENIED | uniq -$CLICKHOUSE_CLIENT -nm --user="${username}" --query " +$CLICKHOUSE_CLIENT -m --user="${username}" --query " SELECT * FROM ${dicttablename}; " 2>&1 | grep -o ACCESS_DENIED | uniq -${CLICKHOUSE_CLIENT} -nm --query " +${CLICKHOUSE_CLIENT} -m --query " DROP TABLE IF EXISTS ${dicttablename} SYNC; DROP DICTIONARY IF EXISTS ${dictname}; DROP USER IF EXISTS ${username}; diff --git a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference index 6d2c1334d6e..8d2470dea44 100644 --- a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference +++ b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.reference @@ -1,10 +1,10 @@ -\N -\N -\N -\N -\N -\N -\N -\N -\N -\N +0 +1 +2 +3 +4 +0 +1 +2 +3 +4 diff --git a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql index 25f3bb0f4c8..939b49e1599 100644 --- a/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql +++ b/tests/queries/0_stateless/03200_memory_engine_alter_dynamic.sql @@ -1,6 +1,8 @@ set allow_experimental_dynamic_type=1; +drop table if exists test; create table test (d Dynamic) engine=Memory; insert into table test select * from numbers(5); -alter table test modify column d Dynamic(max_types=1); +alter table test modify column d Dynamic(max_types=0); select d.UInt64 from test settings enable_analyzer=1; select d.UInt64 from test settings enable_analyzer=0; +drop table test; diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference new file mode 100644 index 00000000000..6ed281c757a --- /dev/null +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.reference @@ -0,0 +1,2 @@ +1 +1 diff --git a/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql new file mode 100644 index 00000000000..bb3269da597 --- /dev/null +++ b/tests/queries/0_stateless/03203_count_with_non_deterministic_function.sql @@ -0,0 +1,4 @@ +CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x; +INSERT INTO t SELECT 0, number FROM numbers(10) SETTINGS max_block_size = 100; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 0; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS allow_experimental_analyzer = 1; diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.reference b/tests/queries/0_stateless/03203_hive_style_partitioning.reference new file mode 100644 index 00000000000..a4a2e48e046 --- /dev/null +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.reference @@ -0,0 +1,120 @@ +TESTING THE FILE HIVE PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +42 2020-01-01 +[1,2,3] 42.42 +Array(Int64) LowCardinality(Float64) +101 +2070 +4081 +2070 +2070 +1 +1 +TESTING THE URL PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +1 +TESTING THE S3 PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +Elizabeth Gordon Elizabeth Gordon +Elizabeth Gordon Elizabeth +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +OK +TESTING THE S3CLUSTER PARTITIONING +first last Elizabeth +Jorge Frank Elizabeth +Hunter Moreno Elizabeth +Esther Guzman Elizabeth +Dennis Stephens Elizabeth +Nettie Franklin Elizabeth +Stanley Gibson Elizabeth +Eugenia Greer Elizabeth +Jeffery Delgado Elizabeth +Clara Cross Elizabeth +Elizabeth Gordon Elizabeth +Eva Schmidt Elizabeth Schmidt +Samuel Schmidt Elizabeth Schmidt +Eva Schmidt Elizabeth +Samuel Schmidt Elizabeth diff --git a/tests/queries/0_stateless/03203_hive_style_partitioning.sh b/tests/queries/0_stateless/03203_hive_style_partitioning.sh new file mode 100755 index 00000000000..db1f073d736 --- /dev/null +++ b/tests/queries/0_stateless/03203_hive_style_partitioning.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# Tags: no-fasttest + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE FILE HIVE PARTITIONING'" + + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _non_existing_column FROM file('$CURDIR/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=*/sample.parquet') WHERE column0 = _column0; + +SELECT _number, _date FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') LIMIT 1; +SELECT _array, _float FROM file('$CURDIR/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet') LIMIT 1; +SELECT toTypeName(_array), toTypeName(_float) FROM file('$CURDIR/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet') LIMIT 1; +SELECT count(*) FROM file('$CURDIR/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet') WHERE _number = 42; +""" + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 1; + +SELECT _identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; +SELECT __identifier FROM file('$CURDIR/data_hive/partitioning/identifier=*/email.csv') LIMIT 2; +""" + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "INCORRECT_DATA" + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 0; + +SELECT *, _column0 FROM file('$CURDIR/data_hive/partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" + + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE URL PARTITIONING'" + + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _non_existing_column FROM url('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10;""" + +$CLICKHOUSE_LOCAL -n -q """ +set use_hive_partitioning = 0; + +SELECT *, _column0 FROM url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -c "UNKNOWN_IDENTIFIER" + + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3 PARTITIONING'" + + +$CLICKHOUSE_CLIENT -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; + +SELECT *, _non_existing_column FROM s3('http://localhost:11111/test/hive_partitioning/non_existing_column=Elizabeth/sample.parquet') LIMIT 10; +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=*/sample.parquet') WHERE column0 = _column0; +""" + +$CLICKHOUSE_CLIENT -n -q """ +set use_hive_partitioning = 0; + +SELECT *, _column0 FROM s3('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; +""" 2>&1 | grep -F -q "UNKNOWN_IDENTIFIER" && echo "OK" || echo "FAIL"; + +$CLICKHOUSE_LOCAL -q "SELECT 'TESTING THE S3CLUSTER PARTITIONING'" + +$CLICKHOUSE_CLIENT -n -q """ +set use_hive_partitioning = 1; + +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') LIMIT 10; + +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') WHERE column0 = _column0; + +SELECT *, _column0, _column1 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column1 = _column1; + +SELECT *, _column0, _column1 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +SELECT *, _column0 FROM s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet') WHERE column0 = _column0 AND column1 = _column1; +""" diff --git a/tests/queries/0_stateless/03205_system_sync_replica_format.reference b/tests/queries/0_stateless/03205_system_sync_replica_format.reference new file mode 100644 index 00000000000..aad51dd90b0 --- /dev/null +++ b/tests/queries/0_stateless/03205_system_sync_replica_format.reference @@ -0,0 +1 @@ +SYSTEM SYNC REPLICA db.`table` LIGHTWEIGHT diff --git a/tests/queries/0_stateless/03205_system_sync_replica_format.sql b/tests/queries/0_stateless/03205_system_sync_replica_format.sql new file mode 100644 index 00000000000..329bce80afc --- /dev/null +++ b/tests/queries/0_stateless/03205_system_sync_replica_format.sql @@ -0,0 +1 @@ +SELECT formatQuery('SYSTEM SYNC REPLICA db.table LIGHTWEIGHT'); diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.reference b/tests/queries/0_stateless/03210_dynamic_squashing.reference new file mode 100644 index 00000000000..1c23c22f550 --- /dev/null +++ b/tests/queries/0_stateless/03210_dynamic_squashing.reference @@ -0,0 +1,12 @@ +1 +Array(UInt8) true +None false +UInt64 false +2 +Array(UInt8) true +None false +UInt64 false +3 +Array(UInt8) true +String false +UInt64 true diff --git a/tests/queries/0_stateless/03210_dynamic_squashing.sql b/tests/queries/0_stateless/03210_dynamic_squashing.sql new file mode 100644 index 00000000000..71d09263fda --- /dev/null +++ b/tests/queries/0_stateless/03210_dynamic_squashing.sql @@ -0,0 +1,25 @@ +-- Tags: long + +set allow_experimental_dynamic_type = 1; +set max_block_size = 1000; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); +select '1'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; + +drop table test; +create table test (d Dynamic(max_types=1)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); +select '2'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; + +truncate table test; +insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); +select '3'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; + +drop table test; + diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference index d4734a85e72..4ecf7f56b07 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.reference @@ -38,3 +38,19 @@ 7 8 9 +15 \N 3 15 15 15 15 +14 \N 2 10 10 10 154 +13 \N 2 10 10 10 143 +12 \N 2 10 10 10 14 +11 \N 2 10 10 10 12 +10 \N 2 10 10 10 10 +9 \N 1 5 5 5 99 +8 \N 1 5 5 5 88 +7 \N 1 5 5 5 9 +6 \N 1 5 5 5 7 +5 \N 1 5 5 5 5 +4 \N 0 0 0 0 44 +3 \N 0 0 0 0 33 +2 \N 0 0 0 0 4 +1 \N 0 0 0 0 2 +0 \N 0 0 0 0 0 diff --git a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql index f6017ee6690..cc6746e428f 100644 --- a/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql +++ b/tests/queries/0_stateless/03210_lag_lead_inframe_types.sql @@ -2,3 +2,23 @@ SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (OR SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT lagInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); SELECT leadInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); + +SELECT + number, + YYYYMMDDToDate(1, toLowCardinality(11), max(YYYYMMDDToDate(YYYYMMDDToDate(toLowCardinality(1), 11, materialize(NULL), 19700101.1, 1, 27, 7, materialize(toUInt256(37)), 9, 19, 9), 1, toUInt128(11), NULL, 19700101.1, 1, 27, 7, 37, 9, 19, 9), toUInt256(30)) IGNORE NULLS OVER w, NULL, 19700101.1, toNullable(1), 27, materialize(7), 37, 9, 19, 9), + p, + pp, + lagInFrame(number, number - pp) OVER w AS lag2, + lagInFrame(number, number - pp, number * 11) OVER w AS lag, + leadInFrame(number, number - pp, number * 11) OVER w AS lead +FROM +( + SELECT + number, + intDiv(number, 5) AS p, + p * 5 AS pp + FROM numbers(16) +) +WHERE toLowCardinality(1) +WINDOW w AS (PARTITION BY p ORDER BY number ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY number DESC NULLS LAST; diff --git a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh index 8a77538f592..adbb0cb6de0 100755 --- a/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh +++ b/tests/queries/0_stateless/03212_max_bytes_to_read_for_schema_inference_in_cache.sh @@ -5,7 +5,7 @@ CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) . "$CUR_DIR"/../shell_config.sh echo '{"x" : 42}' > $CLICKHOUSE_TEST_UNIQUE_NAME.json -$CLICKHOUSE_LOCAL -nm -q " +$CLICKHOUSE_LOCAL -m -q " DESC file('$CLICKHOUSE_TEST_UNIQUE_NAME.json') SETTINGS input_format_max_bytes_to_read_for_schema_inference=1000; SELECT additional_format_info from system.schema_inference_cache" diff --git a/tests/queries/0_stateless/03215_grant_current_grants.reference b/tests/queries/0_stateless/03215_grant_current_grants.reference new file mode 100644 index 00000000000..e4f6850b806 --- /dev/null +++ b/tests/queries/0_stateless/03215_grant_current_grants.reference @@ -0,0 +1,2 @@ +GRANT SELECT, CREATE TABLE, CREATE VIEW ON default.* +GRANT SELECT ON default.* diff --git a/tests/queries/0_stateless/03215_grant_current_grants.sh b/tests/queries/0_stateless/03215_grant_current_grants.sh new file mode 100755 index 00000000000..68af4a62bba --- /dev/null +++ b/tests/queries/0_stateless/03215_grant_current_grants.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +CURDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) +# shellcheck source=../shell_config.sh +. "$CURDIR"/../shell_config.sh + + +user1="user03215_1_${CLICKHOUSE_DATABASE}_$RANDOM" +user2="user03215_2_${CLICKHOUSE_DATABASE}_$RANDOM" +user3="user03215_3_${CLICKHOUSE_DATABASE}_$RANDOM" +db=${CLICKHOUSE_DATABASE} + + +${CLICKHOUSE_CLIENT} --query "CREATE USER $user1, $user2, $user3;"; +${CLICKHOUSE_CLIENT} --query "GRANT SELECT, CREATE TABLE, CREATE VIEW ON $db.* TO $user1 WITH GRANT OPTION;"; + +${CLICKHOUSE_CLIENT} --query "GRANT CURRENT GRANTS ON $db.* TO $user2" --user $user1; +${CLICKHOUSE_CLIENT} --query "GRANT CURRENT GRANTS ON $db.* TO $user3" --user $user2; + +${CLICKHOUSE_CLIENT} --query "SHOW GRANTS FOR $user2" | sed 's/ TO.*//'; +${CLICKHOUSE_CLIENT} --query "SHOW GRANTS FOR $user3" | sed 's/ TO.*//'; + +${CLICKHOUSE_CLIENT} --query "GRANT CURRENT GRANTS(SELECT ON $db.*) TO $user3" --user $user1; +${CLICKHOUSE_CLIENT} --query "SHOW GRANTS FOR $user3" | sed 's/ TO.*//'; + +${CLICKHOUSE_CLIENT} --query "DROP USER IF EXISTS $user1, $user2, $user3"; diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.reference b/tests/queries/0_stateless/03215_multilinestring_geometry.reference new file mode 100644 index 00000000000..9702dd6d6f8 --- /dev/null +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.reference @@ -0,0 +1,30 @@ +-- { echoOn } +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +[[(1,1),(2,2),(3,3),(1,1)]] +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +MultiLineString +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +MULTILINESTRING((1 1,2 2,3 3,1 1)) +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +[[(1,1),(2,2),(3,3),(1,1)],[(1,0),(2,0),(3,0)]] +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +MultiLineString +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +MULTILINESTRING((1 1,2 2,3 3,1 1),(1 0,2 0,3 0)) +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x +SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); +POLYGON((1 1,2 2,3 3,1 1)) String [[(1,1),(2,2),(3,3),(1,1)]] Polygon +-- Non constant tests + +DROP TABLE IF EXISTS t; +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String, ord Float64) Engine = Memory; +INSERT INTO t (ord, shape, wkt_string) VALUES (1, [[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (2, [[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (3, [[(1, 0), (2, 1), (3, 0), (4, 1), (5, 0), (6, 1), (7, 0), (8, 1), (9, 0), (10, 1)]], 'MULTILINESTRING ((1 0, 2 1, 3 0, 4 1, 5 0, 6 1, 7 0, 8 1, 9 0, 10 1))'); +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +-- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t order by ord; +POLYGON((1 1,2 2,3 3,1 1)) [[(1,1),(2,2),(3,3),(1,1)]] 1 +POLYGON((1 1,2 2,3 3,1 1),(1 0,2 0,3 0,1 0)) [[(1,1),(2,2),(3,3),(1,1)],[(1,0),(2,0),(3,0)]] 1 +POLYGON((1 0,2 1,3 0,4 1,5 0,6 1,7 0,8 1,9 0,10 1,1 0)) [[(1,0),(2,1),(3,0),(4,1),(5,0),(6,1),(7,0),(8,1),(9,0),(10,1)]] 1 diff --git a/tests/queries/0_stateless/03215_multilinestring_geometry.sql b/tests/queries/0_stateless/03215_multilinestring_geometry.sql new file mode 100644 index 00000000000..cf4ef15f63d --- /dev/null +++ b/tests/queries/0_stateless/03215_multilinestring_geometry.sql @@ -0,0 +1,26 @@ +-- { echoOn } +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); + +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); + +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x +SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); + +-- Non constant tests + +DROP TABLE IF EXISTS t; +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String, ord Float64) Engine = Memory; +INSERT INTO t (ord, shape, wkt_string) VALUES (1, [[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (2, [[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (3, [[(1, 0), (2, 1), (3, 0), (4, 1), (5, 0), (6, 1), (7, 0), (8, 1), (9, 0), (10, 1)]], 'MULTILINESTRING ((1 0, 2 1, 3 0, 4 1, 5 0, 6 1, 7 0, 8 1, 9 0, 10 1))'); + +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +-- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t order by ord; + + diff --git a/tests/queries/0_stateless/03217_filtering_in_storage_merge.reference b/tests/queries/0_stateless/03217_filtering_in_storage_merge.reference new file mode 100644 index 00000000000..d366ad04c39 --- /dev/null +++ b/tests/queries/0_stateless/03217_filtering_in_storage_merge.reference @@ -0,0 +1,6 @@ +Expression ((Project names + Projection)) + Aggregating + Expression (Before GROUP BY) + ReadFromMerge + Filter (( + ( + ))) + ReadFromMergeTree (default.test_03217_merge_replica_1) diff --git a/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql b/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql new file mode 100644 index 00000000000..42d31e95f9c --- /dev/null +++ b/tests/queries/0_stateless/03217_filtering_in_storage_merge.sql @@ -0,0 +1,16 @@ +CREATE TABLE test_03217_merge_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_merge_replica', 'r1') + ORDER BY x; +CREATE TABLE test_03217_merge_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_merge_replica', 'r2') + ORDER BY x; + + +CREATE TABLE test_03217_all_replicas (x UInt32) + ENGINE = Merge(currentDatabase(), 'test_03217_merge_replica_*'); + +INSERT INTO test_03217_merge_replica_1 SELECT number AS x FROM numbers(10); +SYSTEM SYNC REPLICA test_03217_merge_replica_2; + +-- If the filter on _table is not applied, then the plan will show both replicas +EXPLAIN SELECT _table, count() FROM test_03217_all_replicas WHERE _table = 'test_03217_merge_replica_1' AND x >= 0 GROUP BY _table SETTINGS allow_experimental_analyzer=1; diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.reference b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference new file mode 100644 index 00000000000..c0761c3f689 --- /dev/null +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.reference @@ -0,0 +1,6 @@ +information_schema tables +both default test_03217_system_tables_replica_1 r1 +both default test_03217_system_tables_replica_2 r2 +default test_03217_system_tables_replica_1 r1 +1 +1 diff --git a/tests/queries/0_stateless/03217_filtering_in_system_tables.sql b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql new file mode 100644 index 00000000000..eb506dfe39a --- /dev/null +++ b/tests/queries/0_stateless/03217_filtering_in_system_tables.sql @@ -0,0 +1,30 @@ +-- If filtering is not done correctly on databases, then this query report to read 3 rows, which are: `system.tables`, `information_schema.tables` and `INFORMATION_SCHEMA.tables` +SELECT database, table FROM system.tables WHERE database = 'information_schema' AND table = 'tables'; + +CREATE TABLE test_03217_system_tables_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r1') + ORDER BY x; +CREATE TABLE test_03217_system_tables_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r2') + ORDER BY x; + +-- Make sure we can read both replicas +-- The replica name might be altered because of `_functional_tests_helper_database_replicated_replace_args_macros`, +-- thus we need to use `left` +SELECT 'both', database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase(); +-- If filtering is not done correctly on database-table column, then this query report to read 2 rows, which are the above tables +SELECT database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name LIKE 'r1%'; +SYSTEM FLUSH LOGS; +-- argMax is necessary to make the test repeatable + +-- StorageSystemTables +SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND current_database = currentDatabase() + AND query LIKE '%SELECT database, table FROM system.tables WHERE database = \'information_schema\' AND table = \'tables\';' + AND type = 'QueryFinish'; + +-- StorageSystemReplicas +SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND current_database = currentDatabase() + AND query LIKE '%SELECT database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name LIKE \'r1\%\';' + AND type = 'QueryFinish'; diff --git a/tests/queries/0_stateless/03218_materialize_msan.reference b/tests/queries/0_stateless/03218_materialize_msan.reference new file mode 100644 index 00000000000..eb84f35f9f4 --- /dev/null +++ b/tests/queries/0_stateless/03218_materialize_msan.reference @@ -0,0 +1 @@ +[(NULL,'11\01111111\011111','1111')] -2147483648 \N diff --git a/tests/queries/0_stateless/03218_materialize_msan.sql b/tests/queries/0_stateless/03218_materialize_msan.sql new file mode 100644 index 00000000000..7e7043e687b --- /dev/null +++ b/tests/queries/0_stateless/03218_materialize_msan.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; + +SELECT + materialize([(NULL, '11\01111111\011111', '1111')]) AS t, + (t[1048576]).2, + materialize(-2147483648), + (t[-2147483648]).1 +GROUP BY + materialize([(NULL, '1')]), + '', + (materialize((t[1023]).2), (materialize(''), (t[2147483647]).1, materialize(9223372036854775807)), (materialize(''), materialize(NULL, 2147483647, t[65535], 256)), materialize(NULL)) +; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT + materialize([(NULL, '11\01111111\011111', '1111')]) AS t, + (t[1048576]).2, + materialize(-2147483648), + (t[-2147483648]).1 +GROUP BY + materialize([(NULL, '1')]), + '', + (materialize((t[1023]).2), (materialize(''), (t[2147483647]).1, materialize(9223372036854775807)), (materialize(''), materialize(NULL), materialize(2147483647), materialize(t[65535]), materialize(256)), materialize(NULL)) +; diff --git a/tests/queries/0_stateless/03221_merge_profile_events.reference b/tests/queries/0_stateless/03221_merge_profile_events.reference new file mode 100644 index 00000000000..d969717336b --- /dev/null +++ b/tests/queries/0_stateless/03221_merge_profile_events.reference @@ -0,0 +1,3 @@ +Horizontal 1 20000 3 0 480000 1 1 1 1 +Vertical 1 20000 1 2 480000 1 1 1 1 1 1 +Vertical 2 400000 2 6 12800000 1 1 1 1 1 1 1 1 1 1 diff --git a/tests/queries/0_stateless/03221_merge_profile_events.sql b/tests/queries/0_stateless/03221_merge_profile_events.sql new file mode 100644 index 00000000000..1aa3dd266f8 --- /dev/null +++ b/tests/queries/0_stateless/03221_merge_profile_events.sql @@ -0,0 +1,90 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +CREATE TABLE t_merge_profile_events_1 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_1 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0 +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_1' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +CREATE TABLE t_merge_profile_events_2 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_2 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_2' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +DROP TABLE IF EXISTS t_merge_profile_events_3; + +CREATE TABLE t_merge_profile_events_3 (id UInt64, v1 UInt64, v2 UInt64, PROJECTION p (SELECT v2, v2 * v2, v2 * 2, v2 * 10, v1 ORDER BY v1)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); + +OPTIMIZE TABLE t_merge_profile_events_3 FINAL; +SYSTEM FLUSH LOGS; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageTotalMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] <= duration_ms, + ProfileEvents['MergeTotalMilliseconds'] <= duration_ms +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_3' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_3; diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.reference b/tests/queries/0_stateless/03221_mutate_profile_events.reference new file mode 100644 index 00000000000..d094c37ff88 --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.reference @@ -0,0 +1,2 @@ +3 2 1 10000 160000 0 1 1 1 +4 2 1 10000 320000 1 0 1 1 diff --git a/tests/queries/0_stateless/03221_mutate_profile_events.sql b/tests/queries/0_stateless/03221_mutate_profile_events.sql new file mode 100644 index 00000000000..e9f7f9670bd --- /dev/null +++ b/tests/queries/0_stateless/03221_mutate_profile_events.sql @@ -0,0 +1,33 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_mutate_profile_events; + +CREATE TABLE t_mutate_profile_events (key UInt64, id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id PARTITION BY key +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_mutate_profile_events SELECT 1, number, number, number FROM numbers(10000); +INSERT INTO t_mutate_profile_events SELECT 2, number, number, number FROM numbers(10000); + +SET mutations_sync = 2; + +ALTER TABLE t_mutate_profile_events UPDATE v1 = 1000 WHERE key = 1; +ALTER TABLE t_mutate_profile_events DELETE WHERE key = 2 AND v2 % 10 = 0; + +SYSTEM FLUSH LOGS; + +SELECT + splitByChar('_', part_name)[-1] AS version, + sum(ProfileEvents['MutationTotalParts']), + sum(ProfileEvents['MutationUntouchedParts']), + sum(ProfileEvents['MutatedRows']), + sum(ProfileEvents['MutatedUncompressedBytes']), + sum(ProfileEvents['MutationAllPartColumns']), + sum(ProfileEvents['MutationSomePartColumns']), + sum(ProfileEvents['MutationTotalMilliseconds']) > 0, + sum(ProfileEvents['MutationExecuteMilliseconds']) > 0, +FROM system.part_log +WHERE database = currentDatabase() AND table = 't_mutate_profile_events' AND event_type = 'MutatePart' +GROUP BY version ORDER BY version; + +DROP TABLE IF EXISTS t_mutate_profile_events diff --git a/tests/queries/0_stateless/03222_create_timeseries_table.reference b/tests/queries/0_stateless/03222_create_timeseries_table.reference new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/queries/0_stateless/03222_create_timeseries_table.sql b/tests/queries/0_stateless/03222_create_timeseries_table.sql new file mode 100644 index 00000000000..bdb29e7d366 --- /dev/null +++ b/tests/queries/0_stateless/03222_create_timeseries_table.sql @@ -0,0 +1,7 @@ +SET allow_experimental_time_series_table = 1; + +CREATE TABLE 03222_timeseries_table1 ENGINE = TimeSeries FORMAT Null; +CREATE TABLE 03222_timeseries_table2 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 1, aggregate_min_time_and_max_time = 1 FORMAT Null; +--- This doesn't work because allow_nullable_key cannot be set in query for the internal MergeTree tables +--- CREATE TABLE 03222_timeseries_table3 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 1, aggregate_min_time_and_max_time = 0; +CREATE TABLE 03222_timeseries_table4 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 0 FORMAT Null; diff --git a/tests/queries/0_stateless/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/array=[1,2,3]/float=42.42/sample.parquet differ diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column0=Elizabeth/sample.parquet differ diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Gordon/sample.parquet differ diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet differ diff --git a/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/column0=Elizabeth/sample.parquet differ diff --git a/tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv b/tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv new file mode 100644 index 00000000000..936d995cc64 --- /dev/null +++ b/tests/queries/0_stateless/data_hive/partitioning/identifier=2070/email.csv @@ -0,0 +1,5 @@ +_login_email,_identifier,_first_name,_last_name +laura@example.com,2070,Laura,Grey +craig@example.com,4081,Craig,Johnson +mary@example.com,9346,Mary,Jenkins +jamie@example.com,5079,Jamie,Smith diff --git a/tests/queries/0_stateless/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/non_existing_column=Elizabeth/sample.parquet differ diff --git a/tests/queries/0_stateless/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet b/tests/queries/0_stateless/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_hive/partitioning/number=42/date=2020-01-01/sample.parquet differ diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Gordon/sample.parquet differ diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/column1=Schmidt/sample.parquet differ diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_minio/hive_partitioning/column0=Elizabeth/sample.parquet differ diff --git a/tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet b/tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet new file mode 100644 index 00000000000..9b6a78cf8cc Binary files /dev/null and b/tests/queries/0_stateless/data_minio/hive_partitioning/non_existing_column=Elizabeth/sample.parquet differ diff --git a/utils/check-style/aspell-ignore/en/aspell-dict.txt b/utils/check-style/aspell-ignore/en/aspell-dict.txt index 469c0a5fc5f..ce7c666912e 100644 --- a/utils/check-style/aspell-ignore/en/aspell-dict.txt +++ b/utils/check-style/aspell-ignore/en/aspell-dict.txt @@ -1,4 +1,4 @@ -personal_ws-1.1 en 2942 +personal_ws-1.1 en 2942 AArch ACLs ALTERs @@ -562,6 +562,7 @@ MindsDB Mongodb Monotonicity MsgPack +MultiLineString MultiPolygon Multiline Multiqueries @@ -1701,6 +1702,8 @@ getOSKernelVersion getServerPort getSetting getSizeOfEnumType +getSubcolumn +getTypeSerializationStreams getblockinfo getevents ghcnd @@ -2090,6 +2093,7 @@ multiSearchFirstPositionUTF multibyte multidirectory multiline +multilinestring multiplyDecimal multipolygon multisearchany @@ -2113,6 +2117,7 @@ namenode namepassword nameprofile namequota +namespace namespaces natively nats @@ -2367,6 +2372,7 @@ rankCorr rapidjson rawblob readWKTLineString +readWKTMultiLineString readWKTMultiPolygon readWKTPoint readWKTPolygon diff --git a/utils/list-versions/version_date.tsv b/utils/list-versions/version_date.tsv index f46353277e2..71a4a722a36 100644 --- a/utils/list-versions/version_date.tsv +++ b/utils/list-versions/version_date.tsv @@ -13,6 +13,7 @@ v24.4.4.113-stable 2024-08-02 v24.4.3.25-stable 2024-06-14 v24.4.2.141-stable 2024-06-07 v24.4.1.2088-stable 2024-05-01 +v24.3.7.30-lts 2024-08-14 v24.3.6.48-lts 2024-08-02 v24.3.5.46-lts 2024-07-03 v24.3.4.147-lts 2024-06-13