Merge branch 'ClickHouse:master' into document_isXYZ

This commit is contained in:
Shaun Struwig 2024-05-08 22:04:38 +02:00 committed by GitHub
commit 4c9cc50781
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
233 changed files with 3389 additions and 1271 deletions

View File

@ -135,23 +135,21 @@ endif ()
include (cmake/check_flags.cmake)
include (cmake/add_warning.cmake)
if (COMPILER_CLANG)
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# NOTE: that clang has a bug because of it does not emit .debug_aranges
# with ThinLTO, so custom ld.lld wrapper is shipped in docker images.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
# See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
# generate ranges for fast "addr2line" search
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# NOTE: that clang has a bug because of it does not emit .debug_aranges
# with ThinLTO, so custom ld.lld wrapper is shipped in docker images.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -gdwarf-aranges")
endif ()
# See https://blog.llvm.org/posts/2021-04-05-constructor-homing-for-debug-info/
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Xclang -fuse-ctor-homing")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Xclang -fuse-ctor-homing")
endif()
no_warning(enum-constexpr-conversion) # breaks Protobuf in clang-16
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
@ -284,16 +282,12 @@ endif ()
option (ENABLE_BUILD_PROFILING "Enable profiling of build time" OFF)
if (ENABLE_BUILD_PROFILING)
if (COMPILER_CLANG)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ftime-trace")
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
else ()
message (${RECONFIGURE_MESSAGE_LEVEL} "Build profiling is only available with CLang")
endif ()
if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--time-trace")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -Wl,--time-trace")
endif ()
endif ()
set (CMAKE_CXX_STANDARD 23)
@ -304,22 +298,20 @@ set (CMAKE_C_STANDARD 11)
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
set (CMAKE_C_STANDARD_REQUIRED ON)
if (COMPILER_CLANG)
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
# See https://reviews.llvm.org/D112921
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
# benchmarks.
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
endif ()
if (ARCH_AMD64)
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
# which makes benchmark results more stable.
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
endif()
# Disable floating-point expression contraction in order to get consistent floating point calculation results across platforms
set (COMPILER_FLAGS "${COMPILER_FLAGS} -ffp-contract=off")
@ -348,39 +340,34 @@ set (CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${COMPILER_FLAGS} $
set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O0 ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (COMPILER_CLANG)
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
# Display absolute paths in error messages. Otherwise KDevelop fails to navigate to correct file and opens a new file instead.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fdiagnostics-absolute-paths")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fdiagnostics-absolute-paths")
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
# https://clang.llvm.org/docs/ThinLTO.html
# Applies to clang and linux only.
# Disabled when building with tests or sanitizers.
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
endif()
if (NOT ENABLE_TESTS AND NOT SANITIZE AND NOT SANITIZE_COVERAGE AND OS_LINUX)
# https://clang.llvm.org/docs/ThinLTO.html
# Applies to clang and linux only.
# Disabled when building with tests or sanitizers.
option(ENABLE_THINLTO "Clang-specific link time optimization" ON)
endif()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE)
# Link time optimization
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
elseif (ENABLE_THINLTO)
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot enable ThinLTO")
endif ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrict-vtable-pointers")
# We cannot afford to use LTO when compiling unit tests, and it's not enough
# to only supply -fno-lto at the final linking stage. So we disable it
# completely.
if (ENABLE_THINLTO AND NOT ENABLE_TESTS AND NOT SANITIZE)
# Link time optimization
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
set (CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} -flto=thin -fwhole-program-vtables")
elseif (ENABLE_THINLTO)
message (${RECONFIGURE_MESSAGE_LEVEL} "ThinLTO is only available with Clang")
message (${RECONFIGURE_MESSAGE_LEVEL} "Cannot enable ThinLTO")
endif ()
# Turns on all external libs like s3, kafka, ODBC, ...

View File

@ -5,17 +5,15 @@ set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
if (COMPILER_CLANG)
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
if (SANITIZE STREQUAL undefined)
string(REPLACE "builtins.a" "ubsan_standalone_cxx.a" EXTRA_BUILTINS_LIBRARY "${BUILTINS_LIBRARY}")
endif ()
# Apparently, in clang-19, the UBSan support library for C++ was moved out into ubsan_standalone_cxx.a, so we have to include both.
if (SANITIZE STREQUAL undefined)
string(REPLACE "builtins.a" "ubsan_standalone_cxx.a" EXTRA_BUILTINS_LIBRARY "${BUILTINS_LIBRARY}")
endif ()
if (NOT EXISTS "${BUILTINS_LIBRARY}")
set (BUILTINS_LIBRARY "-lgcc")
endif ()
if (NOT EXISTS "${BUILTINS_LIBRARY}")
set (BUILTINS_LIBRARY "-lgcc")
endif ()
if (OS_ANDROID)

View File

@ -26,9 +26,7 @@ if (SANITIZE)
elseif (SANITIZE STREQUAL "thread")
set (TSAN_FLAGS "-fsanitize=thread")
if (COMPILER_CLANG)
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/tsan_ignorelist.txt")
endif()
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/tsan_ignorelist.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${TSAN_FLAGS}")
@ -44,9 +42,7 @@ if (SANITIZE)
# that's why we often receive reports about UIO. The simplest way to avoid this is just set this flag here.
set(UBSAN_FLAGS "${UBSAN_FLAGS} -fno-sanitize=unsigned-integer-overflow")
endif()
if (COMPILER_CLANG)
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/ubsan_ignorelist.txt")
endif()
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-ignorelist=${PROJECT_SOURCE_DIR}/tests/ubsan_ignorelist.txt")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")

View File

@ -1,10 +1,6 @@
# Compiler
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
set (COMPILER_CLANG 1) # Safe to treat AppleClang as a regular Clang, in general.
elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set (COMPILER_CLANG 1)
else ()
if (NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")
message (FATAL_ERROR "Compiler ${CMAKE_CXX_COMPILER_ID} is not supported")
endif ()
@ -17,30 +13,26 @@ set (CLANG_MINIMUM_VERSION 16)
set (XCODE_MINIMUM_VERSION 12.0)
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
if (COMPILER_CLANG)
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
message (FATAL_ERROR "Compilation with AppleClang is unsupported. Please use vanilla Clang, e.g. from Homebrew.")
endif ()
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
message (FATAL_ERROR "Compilation with AppleClang is unsupported. Please use vanilla Clang, e.g. from Homebrew.")
endif ()
# For a mapping between XCode / AppleClang / vanilla Clang versions, see https://en.wikipedia.org/wiki/Xcode
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with AppleClang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
endif ()
else ()
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with Clang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${CLANG_MINIMUM_VERSION}.")
endif ()
# For a mapping between XCode / AppleClang / vanilla Clang versions, see https://en.wikipedia.org/wiki/Xcode
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with AppleClang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
endif ()
else ()
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${CLANG_MINIMUM_VERSION})
message (FATAL_ERROR "Compilation with Clang version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${CLANG_MINIMUM_VERSION}.")
endif ()
endif ()
# Linker
string (REGEX MATCHALL "[0-9]+" COMPILER_VERSION_LIST ${CMAKE_CXX_COMPILER_VERSION})
list (GET COMPILER_VERSION_LIST 0 COMPILER_VERSION_MAJOR)
# Example values: `lld-10`
# Linker
option (LINKER_NAME "Linker name or full path")
if (LINKER_NAME MATCHES "gold")
@ -48,19 +40,15 @@ if (LINKER_NAME MATCHES "gold")
endif ()
if (NOT LINKER_NAME)
if (COMPILER_CLANG)
if (OS_LINUX AND NOT ARCH_S390X)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld")
endif ()
if (OS_LINUX AND NOT ARCH_S390X)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld")
endif ()
if (LLD_PATH)
if (OS_LINUX OR OS_DARWIN)
if (COMPILER_CLANG)
# Clang driver simply allows full linker path.
set (LINKER_NAME ${LLD_PATH})
endif ()
# Clang driver simply allows full linker path.
set (LINKER_NAME ${LLD_PATH})
endif ()
endif()
endif()
@ -82,47 +70,28 @@ else ()
endif ()
# Archiver
if (COMPILER_CLANG)
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
endif ()
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
if (LLVM_AR_PATH)
set (CMAKE_AR "${LLVM_AR_PATH}")
endif ()
message(STATUS "Using archiver: ${CMAKE_AR}")
# Ranlib
if (COMPILER_CLANG)
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
endif ()
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
if (LLVM_RANLIB_PATH)
set (CMAKE_RANLIB "${LLVM_RANLIB_PATH}")
endif ()
message(STATUS "Using ranlib: ${CMAKE_RANLIB}")
# Install Name Tool
if (COMPILER_CLANG)
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
endif ()
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
if (LLVM_INSTALL_NAME_TOOL_PATH)
set (CMAKE_INSTALL_NAME_TOOL "${LLVM_INSTALL_NAME_TOOL_PATH}")
endif ()
message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}")
# Objcopy
if (COMPILER_CLANG)
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
endif ()
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
if (OBJCOPY_PATH)
message (STATUS "Using objcopy: ${OBJCOPY_PATH}")
else ()
@ -130,11 +99,7 @@ else ()
endif ()
# Strip
if (COMPILER_CLANG)
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
endif ()
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
if (STRIP_PATH)
message (STATUS "Using strip: ${STRIP_PATH}")
else ()

View File

@ -15,37 +15,35 @@ if ((NOT CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG") AND (NOT SANITIZE) AND (NOT CMAKE
add_warning(frame-larger-than=65536)
endif ()
if (COMPILER_CLANG)
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# We want to get everything out of the compiler for code quality.
add_warning(everything)
add_warning(pedantic)
no_warning(zero-length-array)
no_warning(c++98-compat-pedantic)
no_warning(c++98-compat)
no_warning(c++20-compat) # Use constinit in C++20 without warnings
no_warning(sign-conversion)
no_warning(implicit-int-conversion)
no_warning(implicit-int-float-conversion)
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
no_warning(disabled-macro-expansion)
no_warning(documentation-unknown-command)
no_warning(double-promotion)
no_warning(exit-time-destructors)
no_warning(float-equal)
no_warning(global-constructors)
no_warning(missing-prototypes)
no_warning(missing-variable-declarations)
no_warning(padded)
no_warning(switch-enum)
no_warning(undefined-func-template)
no_warning(unused-template)
no_warning(vla)
no_warning(weak-template-vtables)
no_warning(weak-vtables)
no_warning(thread-safety-negative) # experimental flag, too many false positives
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
no_warning(unsafe-buffer-usage) # too aggressive
no_warning(switch-default) # conflicts with "defaults in a switch covering all enum values"
# TODO Enable conversion, sign-conversion, double-promotion warnings.
endif ()
# Add some warnings that are not available even with -Wall -Wextra -Wpedantic.
# We want to get everything out of the compiler for code quality.
add_warning(everything)
add_warning(pedantic)
no_warning(zero-length-array)
no_warning(c++98-compat-pedantic)
no_warning(c++98-compat)
no_warning(c++20-compat) # Use constinit in C++20 without warnings
no_warning(sign-conversion)
no_warning(implicit-int-conversion)
no_warning(implicit-int-float-conversion)
no_warning(ctad-maybe-unsupported) # clang 9+, linux-only
no_warning(disabled-macro-expansion)
no_warning(documentation-unknown-command)
no_warning(double-promotion)
no_warning(exit-time-destructors)
no_warning(float-equal)
no_warning(global-constructors)
no_warning(missing-prototypes)
no_warning(missing-variable-declarations)
no_warning(padded)
no_warning(switch-enum)
no_warning(undefined-func-template)
no_warning(unused-template)
no_warning(vla)
no_warning(weak-template-vtables)
no_warning(weak-vtables)
no_warning(thread-safety-negative) # experimental flag, too many false positives
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
no_warning(unsafe-buffer-usage) # too aggressive
no_warning(switch-default) # conflicts with "defaults in a switch covering all enum values"
# TODO Enable conversion, sign-conversion, double-promotion warnings.

View File

@ -81,9 +81,7 @@ set (CAPNPC_SRCS
add_library(_capnpc ${CAPNPC_SRCS})
target_link_libraries(_capnpc PUBLIC _capnp)
if (COMPILER_CLANG)
set (CAPNP_PRIVATE_CXX_FLAGS -fno-char8_t)
endif ()
set (CAPNP_PRIVATE_CXX_FLAGS -fno-char8_t)
target_compile_options(_kj PRIVATE ${CAPNP_PRIVATE_CXX_FLAGS})
target_compile_options(_capnp PRIVATE ${CAPNP_PRIVATE_CXX_FLAGS})

View File

@ -91,12 +91,10 @@ set(LIB_SOVERSION ${VERSION_MAJOR})
enable_language(ASM)
if(COMPILER_CLANG)
add_definitions(-Wno-unused-command-line-argument)
# Note that s390x build uses mold linker
if(NOT ARCH_S390X)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") # only relevant for -DENABLE_OPENSSL_DYNAMIC=1
endif()
add_definitions(-Wno-unused-command-line-argument)
# Note that s390x build uses mold linker
if(NOT ARCH_S390X)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=lld") # only relevant for -DENABLE_OPENSSL_DYNAMIC=1
endif()
if(ARCH_AMD64)

View File

@ -1,4 +1,4 @@
if (NOT OS_FREEBSD AND NOT (OS_DARWIN AND COMPILER_CLANG))
if (NOT OS_FREEBSD AND NOT OS_DARWIN)
option (ENABLE_SENTRY "Enable Sentry" ${ENABLE_LIBRARIES})
else()
option (ENABLE_SENTRY "Enable Sentry" OFF)

View File

@ -127,9 +127,6 @@ function setup_logs_replication
echo 'Create all configured system logs'
clickhouse-client --query "SYSTEM FLUSH LOGS"
# It's doesn't make sense to try creating tables if SYNC fails
echo "SYSTEM SYNC DATABASE REPLICA default" | clickhouse-client "${CONNECTION_ARGS[@]}" || return 0
debug_or_sanitizer_build=$(clickhouse-client -q "WITH ((SELECT value FROM system.build_options WHERE name='BUILD_TYPE') AS build, (SELECT value FROM system.build_options WHERE name='CXX_FLAGS') as flags) SELECT build='Debug' OR flags LIKE '%fsanitize%'")
echo "Build is debug or sanitizer: $debug_or_sanitizer_build"
@ -143,7 +140,7 @@ function setup_logs_replication
time DateTime COMMENT 'The time of test run',
test_name String COMMENT 'The name of the test',
coverage Array(UInt64) COMMENT 'An array of addresses of the code (a subset of addresses instrumented for coverage) that were encountered during the test run'
) ENGINE = Null COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster'
) ENGINE = MergeTree ORDER BY test_name COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster'
"
# For each system log table:

View File

@ -31,6 +31,11 @@
<allow_experimental_analyzer>
<readonly/>
</allow_experimental_analyzer>
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
<allow_experimental_object_type>
<readonly/>
</allow_experimental_object_type>
</constraints>
</default>
</profiles>

View File

@ -120,13 +120,41 @@ EOL
local max_users_mem
max_users_mem=$((total_mem*30/100)) # 30%
# Similar to docker/test/fuzzer/query-fuzzer-tweaks-users.xml
echo "Setting max_memory_usage_for_user=$max_users_mem and max_memory_usage for queries to 10G"
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
cat > /etc/clickhouse-server/users.d/stress_test_tweaks-users.xml <<EOL
<clickhouse>
<profiles>
<default>
<max_execution_time>60</max_execution_time>
<max_memory_usage>10G</max_memory_usage>
<max_memory_usage_for_user>${max_users_mem}</max_memory_usage_for_user>
<table_function_remote_max_addresses>200</table_function_remote_max_addresses>
<constraints>
<max_execution_time>
<max>60</max>
</max_execution_time>
<max_memory_usage>
<max>10G</max>
</max_memory_usage>
<table_function_remote_max_addresses>
<max>200</max>
</table_function_remote_max_addresses>
<!-- Don't waste cycles testing the old interpreter. Spend time in the new analyzer instead -->
<allow_experimental_analyzer>
<readonly/>
</allow_experimental_analyzer>
<!-- This feature is broken, deprecated and will be removed. We don't want more reports about it -->
<allow_experimental_object_type>
<readonly/>
</allow_experimental_object_type>
</constraints>
</default>
</profiles>
</clickhouse>

View File

@ -5,22 +5,13 @@ title: How to Build, Run and Debug ClickHouse on Linux for s390x (zLinux)
sidebar_label: Build on Linux for s390x (zLinux)
---
As of writing (2023/3/10) building for s390x considered to be experimental. Not all features can be enabled, has broken features and is currently under active development.
At the time of writing (2024 May), support for the s390x platform is considered experimental, i.e. some features are disabled or broken on s390x.
## Building ClickHouse for s390x
## Building
s390x has two OpenSSL-related build options.
- By default, the s390x build will dynamically link to OpenSSL libraries. It will build OpenSSL shared objects, so it's not necessary to install OpenSSL beforehand. (This option is recommended in all cases.)
- Another option is to build OpenSSL in-tree. In this case two build flags need to be supplied to cmake
```bash
-DENABLE_OPENSSL_DYNAMIC=0
```
:::note
s390x builds are temporarily disabled in CI.
:::
s390x has two OpenSSL-related build options:
- By default, OpenSSL is build on s390x as a shared library. This is different from all other platforms, where OpenSSL is build as static library.
- To build OpenSSL as a static library regardless, pass `-DENABLE_OPENSSL_DYNAMIC=0` to CMake.
These instructions assume that the host machine is x86_64 and has all the tooling required to build natively based on the [build instructions](../development/build.md). It also assumes that the host is Ubuntu 22.04 but the following instructions should also work on Ubuntu 20.04.
@ -31,11 +22,16 @@ apt-get install binutils-s390x-linux-gnu libc6-dev-s390x-cross gcc-s390x-linux-g
```
If you wish to cross compile rust code install the rust cross compile target for s390x:
```bash
rustup target add s390x-unknown-linux-gnu
```
The s390x build uses the mold linker, download it from https://github.com/rui314/mold/releases/download/v2.0.0/mold-2.0.0-x86_64-linux.tar.gz
and place it into your `$PATH`.
To build for s390x:
```bash
cmake -DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-s390x.cmake ..
ninja

View File

@ -73,6 +73,7 @@ Optional parameters:
- `rabbitmq_queue_consume` - Use user-defined queues and do not make any RabbitMQ setup: declaring exchanges, queues, bindings. Default: `false`.
- `rabbitmq_username` - RabbitMQ username.
- `rabbitmq_password` - RabbitMQ password.
- `reject_unhandled_messages` - Reject messages (send RabbitMQ negative acknowledgement) in case of errors. This setting is automatically enabled if there is a `x-dead-letter-exchange` defined in `rabbitmq_queue_settings_list`.
- `rabbitmq_commit_on_select` - Commit messages when select query is made. Default: `false`.
- `rabbitmq_max_rows_per_message` — The maximum number of rows written in one RabbitMQ message for row-based formats. Default : `1`.
- `rabbitmq_empty_queue_backoff_start` — A start backoff point to reschedule read if the rabbitmq queue is empty.

View File

@ -22,9 +22,8 @@ ORDER BY Distance(vectors, Point)
LIMIT N
```
`vectors` contains N-dimensional values of type [Array](../../../sql-reference/data-types/array.md) or
[Tuple](../../../sql-reference/data-types/tuple.md), for example embeddings. Function `Distance` computes the distance between two vectors.
Often, the Euclidean (L2) distance is chosen as distance function but [other
`vectors` contains N-dimensional values of type [Array(Float32)](../../../sql-reference/data-types/array.md), for example embeddings.
Function `Distance` computes the distance between two vectors. Often, the Euclidean (L2) distance is chosen as distance function but [other
distance functions](/docs/en/sql-reference/functions/distance-functions.md) are also possible. `Point` is the reference point, e.g. `(0.17,
0.33, ...)`, and `N` limits the number of search results.
@ -47,7 +46,7 @@ of the search space (using clustering, search trees, etc.) which allows to compu
# Creating and Using ANN Indexes {#creating_using_ann_indexes}
Syntax to create an ANN index over an [Array](../../../sql-reference/data-types/array.md) column:
Syntax to create an ANN index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column:
```sql
CREATE TABLE table_with_ann_index
@ -60,19 +59,6 @@ ENGINE = MergeTree
ORDER BY id;
```
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table_with_ann_index
(
`id` Int64,
`vectors` Tuple(Float32[, Float32[, ...]]),
INDEX [ann_index_name] vectors TYPE [ann_index_type]([ann_index_parameters]) [GRANULARITY [N]]
)
ENGINE = MergeTree
ORDER BY id;
```
ANN indexes are built during column insertion and merge. As a result, `INSERT` and `OPTIMIZE` statements will be slower than for ordinary
tables. ANNIndexes are ideally used only with immutable or rarely changed data, respectively when are far more read requests than write
requests.
@ -164,7 +150,7 @@ linear surfaces (lines in 2D, planes in 3D etc.).
</iframe>
</div>
Syntax to create an Annoy index over an [Array](../../../sql-reference/data-types/array.md) column:
Syntax to create an Annoy index over an [Array(Float32)](../../../sql-reference/data-types/array.md) column:
```sql
CREATE TABLE table_with_annoy_index
@ -177,19 +163,6 @@ ENGINE = MergeTree
ORDER BY id;
```
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table_with_annoy_index
(
id Int64,
vectors Tuple(Float32[, Float32[, ...]]),
INDEX [ann_index_name] vectors TYPE annoy([Distance[, NumTrees]]) [GRANULARITY N]
)
ENGINE = MergeTree
ORDER BY id;
```
Annoy currently supports two distance functions:
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).
@ -203,10 +176,9 @@ Parameter `NumTrees` is the number of trees which the algorithm creates (default
more accurate search results but slower index creation / query times (approximately linearly) as well as larger index sizes.
:::note
Indexes over columns of type `Array` will generally work faster than indexes on `Tuple` columns. All arrays must have same length. To avoid
errors, you can use a [CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT
constraint_name_1 CHECK length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default
values) are not supported.
All arrays must have same length. To avoid errors, you can use a
[CONSTRAINT](/docs/en/sql-reference/statements/create/table.md#constraints), for example, `CONSTRAINT constraint_name_1 CHECK
length(vectors) = 256`. Also, empty `Arrays` and unspecified `Array` values in INSERT statements (i.e. default values) are not supported.
:::
The creation of Annoy indexes (whenever a new part is build, e.g. at the end of a merge) is a relatively slow process. You can increase
@ -264,19 +236,6 @@ ENGINE = MergeTree
ORDER BY id;
```
Syntax to create an ANN index over a [Tuple](../../../sql-reference/data-types/tuple.md) column:
```sql
CREATE TABLE table_with_usearch_index
(
id Int64,
vectors Tuple(Float32[, Float32[, ...]]),
INDEX [ann_index_name] vectors TYPE usearch([Distance[, ScalarKind]]) [GRANULARITY N]
)
ENGINE = MergeTree
ORDER BY id;
```
USearch currently supports two distance functions:
- `L2Distance`, also called Euclidean distance, is the length of a line segment between two points in Euclidean space
([Wikipedia](https://en.wikipedia.org/wiki/Euclidean_distance)).

View File

@ -2,7 +2,7 @@
slug: /en/engines/table-engines/mergetree-family/invertedindexes
sidebar_label: Full-text Indexes
description: Quickly find search terms in text.
keywords: [full-text search, text search, inverted, index, indices]
keywords: [full-text search, text search, index, indices]
---
# Full-text Search using Full-text Indexes [experimental]
@ -53,6 +53,10 @@ ENGINE = MergeTree
ORDER BY key
```
:::note
In earlier versions of ClickHouse, the corresponding index type name was `inverted`.
:::
where `N` specifies the tokenizer:
- `full_text(0)` (or shorter: `full_text()`) set the tokenizer to "tokens", i.e. split strings along spaces,

View File

@ -494,7 +494,7 @@ Syntax: `tokenbf_v1(size_of_bloom_filter_in_bytes, number_of_hash_functions, ran
#### Special-purpose
- Experimental indexes to support approximate nearest neighbor (ANN) search. See [here](annindexes.md) for details.
- An experimental inverted index to support full-text search. See [here](invertedindexes.md) for details.
- An experimental full-text index to support full-text search. See [here](invertedindexes.md) for details.
### Functions Support {#functions-support}
@ -502,31 +502,31 @@ Conditions in the `WHERE` clause contains calls of the functions that operate wi
Indexes of type `set` can be utilized by all functions. The other index types are supported as follows:
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | inverted |
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|----------|
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, &lt;&gt;)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [match](/docs/en/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
| [in](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [empty](/docs/en/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions/#has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](/docs/en/sql-reference/functions/array-functions/#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](/docs/en/sql-reference/functions/array-functions/#hasall) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
| hasTokenCaseInsensitiveOrNull (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
| Function (operator) / Index | primary key | minmax | ngrambf_v1 | tokenbf_v1 | bloom_filter | full_text |
|------------------------------------------------------------------------------------------------------------|-------------|--------|------------|------------|--------------|-----------|
| [equals (=, ==)](/docs/en/sql-reference/functions/comparison-functions.md/#equals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notEquals(!=, &lt;&gt;)](/docs/en/sql-reference/functions/comparison-functions.md/#notequals) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [like](/docs/en/sql-reference/functions/string-search-functions.md/#like) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [notLike](/docs/en/sql-reference/functions/string-search-functions.md/#notlike) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [match](/docs/en/sql-reference/functions/string-search-functions.md/#match) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [startsWith](/docs/en/sql-reference/functions/string-functions.md/#startswith) | ✔ | ✔ | ✔ | ✔ | ✗ | ✔ |
| [endsWith](/docs/en/sql-reference/functions/string-functions.md/#endswith) | ✗ | ✗ | ✔ | ✔ | ✗ | ✔ |
| [multiSearchAny](/docs/en/sql-reference/functions/string-search-functions.md/#multisearchany) | ✗ | ✗ | ✔ | ✗ | ✗ | ✔ |
| [in](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [notIn](/docs/en/sql-reference/functions/in-functions) | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ |
| [less (<)](/docs/en/sql-reference/functions/comparison-functions.md/#less) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greater (>)](/docs/en/sql-reference/functions/comparison-functions.md/#greater) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [lessOrEquals (<=)](/docs/en/sql-reference/functions/comparison-functions.md/#lessorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [greaterOrEquals (>=)](/docs/en/sql-reference/functions/comparison-functions.md/#greaterorequals) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [empty](/docs/en/sql-reference/functions/array-functions/#empty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [notEmpty](/docs/en/sql-reference/functions/array-functions/#notempty) | ✔ | ✔ | ✗ | ✗ | ✗ | ✗ |
| [has](/docs/en/sql-reference/functions/array-functions/#has) | ✗ | ✗ | ✔ | ✔ | ✔ | ✔ |
| [hasAny](/docs/en/sql-reference/functions/array-functions/#hasany) | ✗ | ✗ | ✔ | ✔ | ✔ | ✗ |
| [hasAll](/docs/en/sql-reference/functions/array-functions/#hasall) | ✗ | ✗ | ✗ | ✗ | ✔ | ✗ |
| hasToken | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenOrNull | ✗ | ✗ | ✗ | ✔ | ✗ | ✔ |
| hasTokenCaseInsensitive (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
| hasTokenCaseInsensitiveOrNull (*) | ✗ | ✗ | ✗ | ✔ | ✗ | ✗ |
Functions with a constant argument that is less than ngram size cant be used by `ngrambf_v1` for query optimization.

View File

@ -207,7 +207,7 @@ SELECT * FROM nestedt FORMAT TSV
Differs from `TabSeparated` format in that the rows are written without escaping.
When parsing with this format, tabs or linefeeds are not allowed in each field.
This format is also available under the name `TSVRaw`.
This format is also available under the names `TSVRaw`, `Raw`.
## TabSeparatedWithNames {#tabseparatedwithnames}
@ -242,14 +242,14 @@ This format is also available under the name `TSVWithNamesAndTypes`.
Differs from `TabSeparatedWithNames` format in that the rows are written without escaping.
When parsing with this format, tabs or linefeeds are not allowed in each field.
This format is also available under the name `TSVRawWithNames`.
This format is also available under the names `TSVRawWithNames`, `RawWithNames`.
## TabSeparatedRawWithNamesAndTypes {#tabseparatedrawwithnamesandtypes}
Differs from `TabSeparatedWithNamesAndTypes` format in that the rows are written without escaping.
When parsing with this format, tabs or linefeeds are not allowed in each field.
This format is also available under the name `TSVRawWithNamesAndNames`.
This format is also available under the names `TSVRawWithNamesAndNames`, `RawWithNamesAndNames`.
## Template {#format-template}

View File

@ -210,6 +210,18 @@ Features:
- Pre-built metrics dashboards.
- Multiple users/projects via YAML config.
### clickhouse-monitoring {#clickhouse-monitoring}
[clickhouse-monitoring](https://github.com/duyet/clickhouse-monitoring) is a simple Next.js dashboard that relies on `system.*` tables to help monitor and provide an overview of your ClickHouse cluster.
Features:
- Query monitor: current queries, query history, query resources (memory, parts read, file_open, ...), most expensive queries, most used tables or columns, etc.
- Cluster monitor: total memory/CPU usage, distributed queue, global settings, mergetree settings, metrics, etc.
- Tables and parts information: size, row count, compression, part size, etc., at the column level detail.
- Useful tools: Zookeeper data exploration, query EXPLAIN, kill queries, etc.
- Visualization metric charts: queries and resource usage, number of merges/mutation, merge performance, query performance, etc.
## Commercial {#commercial}
### DataGrip {#datagrip}

View File

@ -371,6 +371,8 @@ is equal to
</s3_plain_rewritable>
```
Starting from `24.5` it is possible configure any object storage disk (`s3`, `azure`, `local`) using `plain_rewritable` metadata type.
### Using Azure Blob Storage {#azure-blob-storage}
`MergeTree` family table engines can store data to [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) using a disk with type `azure_blob_storage`.

View File

@ -14,7 +14,7 @@ The `system.part_log` table contains the following columns:
- `event_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of the event that occurred with the data part. Can have one of the following values:
- `NewPart` — Inserting of a new data part.
- `MergeParts` — Merging of data parts.
- `DownloadParts` — Downloading a data part.
- `DownloadPart` — Downloading a data part.
- `RemovePart` — Removing or detaching a data part using [DETACH PARTITION](../../sql-reference/statements/alter/partition.md#alter_detach-partition).
- `MutatePart` — Mutating of a data part.
- `MovePart` — Moving the data part from the one disk to another one.

View File

@ -10,7 +10,7 @@ sidebar_label: Map(K, V)
**Parameters**
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md).
- `key` — The key part of the pair. Arbitrary type, except [Nullable](../../sql-reference/data-types/nullable.md) and [LowCardinality](../../sql-reference/data-types/lowcardinality.md) nested with [Nullable](../../sql-reference/data-types/nullable.md) types.
- `value` — The value part of the pair. Arbitrary type, including [Map](../../sql-reference/data-types/map.md) and [Array](../../sql-reference/data-types/array.md).
To get the value from an `a Map('key', 'value')` column, use `a['key']` syntax. This lookup works now with a linear complexity.

View File

@ -1417,31 +1417,31 @@ toStartOfFifteenMinutes(toDateTime('2023-04-21 10:23:00')): 2023-04-21 10:15:00
This function generalizes other `toStartOf*()` functions with `toStartOfInterval(date_or_date_with_time, INTERVAL x unit [, time_zone])` syntax.
For example,
- `toStartOfInterval(t, INTERVAL 1 year)` returns the same as `toStartOfYear(t)`,
- `toStartOfInterval(t, INTERVAL 1 month)` returns the same as `toStartOfMonth(t)`,
- `toStartOfInterval(t, INTERVAL 1 day)` returns the same as `toStartOfDay(t)`,
- `toStartOfInterval(t, INTERVAL 15 minute)` returns the same as `toStartOfFifteenMinutes(t)`.
- `toStartOfInterval(t, INTERVAL 1 YEAR)` returns the same as `toStartOfYear(t)`,
- `toStartOfInterval(t, INTERVAL 1 MONTH)` returns the same as `toStartOfMonth(t)`,
- `toStartOfInterval(t, INTERVAL 1 DAY)` returns the same as `toStartOfDay(t)`,
- `toStartOfInterval(t, INTERVAL 15 MINUTE)` returns the same as `toStartOfFifteenMinutes(t)`.
The calculation is performed relative to specific points in time:
| Interval | Start |
|-------------|------------------------|
| year | year 0 |
| quarter | 1900 Q1 |
| month | 1900 January |
| week | 1970, 1st week (01-05) |
| day | 1970-01-01 |
| hour | (*) |
| minute | 1970-01-01 00:00:00 |
| second | 1970-01-01 00:00:00 |
| millisecond | 1970-01-01 00:00:00 |
| microsecond | 1970-01-01 00:00:00 |
| nanosecond | 1970-01-01 00:00:00 |
| YEAR | year 0 |
| QUARTER | 1900 Q1 |
| MONTH | 1900 January |
| WEEK | 1970, 1st week (01-05) |
| DAY | 1970-01-01 |
| HOUR | (*) |
| MINUTE | 1970-01-01 00:00:00 |
| SECOND | 1970-01-01 00:00:00 |
| MILLISECOND | 1970-01-01 00:00:00 |
| MICROSECOND | 1970-01-01 00:00:00 |
| NANOSECOND | 1970-01-01 00:00:00 |
(*) hour intervals are special: the calculation is always performed relative to 00:00:00 (midnight) of the current day. As a result, only
hour values between 1 and 23 are useful.
If unit `week` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
If unit `WEEK` was specified, `toStartOfInterval` assumes that weeks start on Monday. Note that this behavior is different from that of function `toStartOfWeek` in which weeks start by default on Sunday.
**See Also**
@ -2588,16 +2588,16 @@ Subtracts a specified number of years from a date, a date with time or a string-
**Syntax**
```sql
subtractYears(date, x)
subtractYears(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of years from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of years from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of years to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` years. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` years. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2625,16 +2625,16 @@ Subtracts a specified number of quarters from a date, a date with time or a stri
**Syntax**
```sql
subtractQuarters(date, x)
subtractQuarters(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of quarters from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of quarters from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of quarters to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` quarters. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` quarters. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2662,16 +2662,16 @@ Subtracts a specified number of months from a date, a date with time or a string
**Syntax**
```sql
subtractMonths(date, x)
subtractMonths(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of months from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of months from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of months to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` months. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` months. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2699,16 +2699,16 @@ Subtracts a specified number of weeks from a date, a date with time or a string-
**Syntax**
```sql
subtractWeeks(date, x)
subtractWeeks(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of weeks from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of weeks from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of weeks to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` weeks. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` weeks. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2736,16 +2736,16 @@ Subtracts a specified number of days from a date, a date with time or a string-e
**Syntax**
```sql
subtractDays(date, x)
subtractDays(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of days from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of days from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of days to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` days. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` days. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2773,16 +2773,16 @@ Subtracts a specified number of hours from a date, a date with time or a string-
**Syntax**
```sql
subtractHours(date, x)
subtractHours(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of hours from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of hours from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[Datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of hours to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` hours. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` hours. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[Datetime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2810,16 +2810,16 @@ Subtracts a specified number of minutes from a date, a date with time or a strin
**Syntax**
```sql
subtractMinutes(date, x)
subtractMinutes(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of minutes from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of minutes from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of minutes to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` minutes. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` minutes. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2847,16 +2847,16 @@ Subtracts a specified number of seconds from a date, a date with time or a strin
**Syntax**
```sql
subtractSeconds(date, x)
subtractSeconds(date, num)
```
**Parameters**
- `date`: Date / date with time to subtract specified number of seconds from. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date`: Date / date with time to subtract specified number of seconds from. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of seconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date` minus `x` seconds. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` minus `num` seconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2884,16 +2884,16 @@ Subtracts a specified number of milliseconds from a date with time or a string-e
**Syntax**
```sql
subtractMilliseconds(date_time, x)
subtractMilliseconds(date_time, num)
```
**Parameters**
- `date_time`: Date with time to subtract specified number of milliseconds from. [datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date_time`: Date with time to subtract specified number of milliseconds from. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of milliseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `x` milliseconds. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date_time` minus `num` milliseconds. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2919,16 +2919,16 @@ Subtracts a specified number of microseconds from a date with time or a string-e
**Syntax**
```sql
subtractMicroseconds(date_time, x)
subtractMicroseconds(date_time, num)
```
**Parameters**
- `date_time`: Date with time to subtract specified number of microseconds from. [datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date_time`: Date with time to subtract specified number of microseconds from. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of microseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `x` microseconds. [datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date_time` minus `num` microseconds. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -2954,16 +2954,16 @@ Subtracts a specified number of nanoseconds from a date with time or a string-en
**Syntax**
```sql
subtractNanoseconds(date_time, x)
subtractNanoseconds(date_time, num)
```
**Parameters**
- `date_time`: Date with time to subtract specified number of nanoseconds from. [datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `x`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [float*](../data-types/float.md).
- `date_time`: Date with time to subtract specified number of nanoseconds from. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md), [String](../data-types/string.md).
- `num`: Number of nanoseconds to subtract. [(U)Int*](../data-types/int-uint.md), [Float*](../data-types/float.md).
**Returned value**
- Returns `date_time` minus `x` nanoseconds. [datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date_time` minus `num` nanoseconds. [DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**
@ -3040,11 +3040,11 @@ subtractTupleOfIntervals(interval_1, interval_2)
**Parameters**
- `date`: First interval or interval of tuples. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- `date`: First interval or interval of tuples. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
- `intervals`: Tuple of intervals to subtract from `date`. [tuple](../data-types/tuple.md)([interval](../data-types/special-data-types/interval.md)).
**Returned value**
- Returns `date` with subtracted `intervals`. [date](../data-types/date.md)/[date32](../data-types/date32.md)/[datetime](../data-types/datetime.md)/[datetime64](../data-types/datetime64.md).
- Returns `date` with subtracted `intervals`. [Date](../data-types/date.md)/[Date32](../data-types/date32.md)/[DateTime](../data-types/datetime.md)/[DateTime64](../data-types/datetime64.md).
**Example**

View File

@ -792,6 +792,39 @@ Result:
│ -1 │
└──────────┘
```
## sigmoid
Returns the [sigmoid function](https://en.wikipedia.org/wiki/Sigmoid_function).
**Syntax**
```sql
sigmoid(x)
```
**Parameters**
- `x` — input value. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
**Returned value**
- Corresponding value along the sigmoid curve between 0 and 1. [Float64](../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
SELECT round(sigmoid(x), 5) FROM (SELECT arrayJoin([-1, 0, 1]) AS x);
```
Result:
```result
0.26894
0.5
0.73106
```
## degrees
@ -914,37 +947,3 @@ Result:
│ 11 │
└──────────────────────────────────┘
```
## sigmoid
Returns the [sigmoid function](https://en.wikipedia.org/wiki/Sigmoid_function).
**Syntax**
```sql
sigmoid(x)
```
**Parameters**
- `x` — input value. Values from the interval: `-∞ < x < +∞`. [(U)Int*](../../sql-reference/data-types/int-uint.md), [Float*](../../sql-reference/data-types/float.md) or [Decimal*](../../sql-reference/data-types/decimal.md).
**Returned value**
- Corresponding value along the sigmoid curve between 0 and 1. [Float64](../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
SELECT round(sigmoid(x), 5) FROM (SELECT arrayJoin([-1, 0, 1]) AS x);
```
Result:
```result
0.26894
0.5
0.73106
```

View File

@ -16,7 +16,7 @@ map(key1, value1[, key2, value2, ...])
**Arguments**
- `key` — The key part of the pair. [String](../../sql-reference/data-types/string.md), [Integer](../../sql-reference/data-types/int-uint.md), [LowCardinality](../../sql-reference/data-types/lowcardinality.md), [FixedString](../../sql-reference/data-types/fixedstring.md), [UUID](../../sql-reference/data-types/uuid.md), [Date](../../sql-reference/data-types/date.md), [DateTime](../../sql-reference/data-types/datetime.md), [Date32](../../sql-reference/data-types/date32.md), [Enum](../../sql-reference/data-types/enum.md).
- `key` — The key part of the pair. Arbitrary type, except [Nullable](../../sql-reference/data-types/nullable.md) and [LowCardinality](../../sql-reference/data-types/lowcardinality.md) nested with [Nullable](../../sql-reference/data-types/nullable.md).
- `value` — The value part of the pair. Arbitrary type, including [Map](../../sql-reference/data-types/map.md) and [Array](../../sql-reference/data-types/array.md).
**Returned value**

View File

@ -5,21 +5,21 @@ sidebar_label: WITH
# WITH Clause
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
ClickHouse supports Common Table Expressions ([CTE](https://en.wikipedia.org/wiki/Hierarchical_and_recursive_queries_in_SQL)) and substitutes the code defined in the `WITH` clause in all places of use for the rest of `SELECT` query. Named subqueries can be included to the current and child query context in places where table objects are allowed. Recursion is prevented by hiding the current level CTEs from the WITH expression.
Please note that CTEs do not guarantee the same results in all places they are called because the query will be re-executed for each use case.
An example of such behavior is below
``` sql
with cte_numbers as
with cte_numbers as
(
select
num
from generateRandom('num UInt64', NULL)
select
num
from generateRandom('num UInt64', NULL)
limit 1000000
)
select
count()
count()
from cte_numbers
where num in (select num from cte_numbers)
```
@ -87,3 +87,226 @@ LIMIT 10;
WITH test1 AS (SELECT i + 1, j + 1 FROM test1)
SELECT * FROM test1;
```
## Recursive Queries
The optional RECURSIVE modifier allows for a WITH query to refer to its own output. Example:
**Example:** Sum integers from 1 through 100
```sql
WITH RECURSIVE test_table AS (
SELECT 1 AS number
UNION ALL
SELECT number + 1 FROM test_table WHERE number < 100
)
SELECT sum(number) FROM test_table;
```
``` text
┌─sum(number)─┐
│ 5050 │
└─────────────┘
```
The general form of a recursive `WITH` query is always a non-recursive term, then `UNION ALL`, then a recursive term, where only the recursive term can contain a reference to the query's own output. Recursive CTE query is executed as follows:
1. Evaluate the non-recursive term. Place result of non-recursive term query in a temporary working table.
2. As long as the working table is not empty, repeat these steps:
1. Evaluate the recursive term, substituting the current contents of the working table for the recursive self-reference. Place result of recursive term query in a temporary intermediate table.
2. Replace the contents of the working table with the contents of the intermediate table, then empty the intermediate table.
Recursive queries are typically used to work with hierarchical or tree-structured data. For example, we can write a query that performs tree traversal:
**Example:** Tree traversal
First let's create tree table:
```sql
DROP TABLE IF EXISTS tree;
CREATE TABLE tree
(
id UInt64,
parent_id Nullable(UInt64),
data String
) ENGINE = MergeTree ORDER BY id;
INSERT INTO tree VALUES (0, NULL, 'ROOT'), (1, 0, 'Child_1'), (2, 0, 'Child_2'), (3, 1, 'Child_1_1');
```
We can traverse those tree with such query:
**Example:** Tree traversal
```sql
WITH RECURSIVE search_tree AS (
SELECT id, parent_id, data
FROM tree t
WHERE t.id = 0
UNION ALL
SELECT t.id, t.parent_id, t.data
FROM tree t, search_tree st
WHERE t.parent_id = st.id
)
SELECT * FROM search_tree;
```
```text
┌─id─┬─parent_id─┬─data──────┐
│ 0 │ ᴺᵁᴸᴸ │ ROOT │
│ 1 │ 0 │ Child_1 │
│ 2 │ 0 │ Child_2 │
│ 3 │ 1 │ Child_1_1 │
└────┴───────────┴───────────┘
```
### Search order
To create a depth-first order, we compute for each result row an array of rows that we have already visited:
**Example:** Tree traversal depth-first order
```sql
WITH RECURSIVE search_tree AS (
SELECT id, parent_id, data, [t.id] AS path
FROM tree t
WHERE t.id = 0
UNION ALL
SELECT t.id, t.parent_id, t.data, arrayConcat(path, [t.id])
FROM tree t, search_tree st
WHERE t.parent_id = st.id
)
SELECT * FROM search_tree ORDER BY path;
```
```text
┌─id─┬─parent_id─┬─data──────┬─path────┐
│ 0 │ ᴺᵁᴸᴸ │ ROOT │ [0] │
│ 1 │ 0 │ Child_1 │ [0,1] │
│ 3 │ 1 │ Child_1_1 │ [0,1,3] │
│ 2 │ 0 │ Child_2 │ [0,2] │
└────┴───────────┴───────────┴─────────┘
```
To create a breadth-first order, standard approach is to add column that tracks the depth of the search:
**Example:** Tree traversal breadth-first order
```sql
WITH RECURSIVE search_tree AS (
SELECT id, parent_id, data, [t.id] AS path, toUInt64(0) AS depth
FROM tree t
WHERE t.id = 0
UNION ALL
SELECT t.id, t.parent_id, t.data, arrayConcat(path, [t.id]), depth + 1
FROM tree t, search_tree st
WHERE t.parent_id = st.id
)
SELECT * FROM search_tree ORDER BY depth;
```
```text
┌─id─┬─link─┬─data──────┬─path────┬─depth─┐
│ 0 │ ᴺᵁᴸᴸ │ ROOT │ [0] │ 0 │
│ 1 │ 0 │ Child_1 │ [0,1] │ 1 │
│ 2 │ 0 │ Child_2 │ [0,2] │ 1 │
│ 3 │ 1 │ Child_1_1 │ [0,1,3] │ 2 │
└────┴──────┴───────────┴─────────┴───────┘
```
### Cycle detection
First let's create graph table:
```sql
DROP TABLE IF EXISTS graph;
CREATE TABLE graph
(
from UInt64,
to UInt64,
label String
) ENGINE = MergeTree ORDER BY (from, to);
INSERT INTO graph VALUES (1, 2, '1 -> 2'), (1, 3, '1 -> 3'), (2, 3, '2 -> 3'), (1, 4, '1 -> 4'), (4, 5, '4 -> 5');
```
We can traverse that graph with such query:
**Example:** Graph traversal without cycle detection
```sql
WITH RECURSIVE search_graph AS (
SELECT from, to, label FROM graph g
UNION ALL
SELECT g.from, g.to, g.label
FROM graph g, search_graph sg
WHERE g.from = sg.to
)
SELECT DISTINCT * FROM search_graph ORDER BY from;
```
```text
┌─from─┬─to─┬─label──┐
│ 1 │ 4 │ 1 -> 4 │
│ 1 │ 2 │ 1 -> 2 │
│ 1 │ 3 │ 1 -> 3 │
│ 2 │ 3 │ 2 -> 3 │
│ 4 │ 5 │ 4 -> 5 │
└──────┴────┴────────┘
```
But if we add cycle in that graph, previous query will fail with `Maximum recursive CTE evaluation depth` error:
```sql
INSERT INTO graph VALUES (5, 1, '5 -> 1');
WITH RECURSIVE search_graph AS (
SELECT from, to, label FROM graph g
UNION ALL
SELECT g.from, g.to, g.label
FROM graph g, search_graph sg
WHERE g.from = sg.to
)
SELECT DISTINCT * FROM search_graph ORDER BY from;
```
```text
Code: 306. DB::Exception: Received from localhost:9000. DB::Exception: Maximum recursive CTE evaluation depth (1000) exceeded, during evaluation of search_graph AS (SELECT from, to, label FROM graph AS g UNION ALL SELECT g.from, g.to, g.label FROM graph AS g, search_graph AS sg WHERE g.from = sg.to). Consider raising max_recursive_cte_evaluation_depth setting.: While executing RecursiveCTESource. (TOO_DEEP_RECURSION)
```
The standard method for handling cycles is to compute an array of the already visited nodes:
**Example:** Graph traversal with cycle detection
```sql
WITH RECURSIVE search_graph AS (
SELECT from, to, label, false AS is_cycle, [tuple(g.from, g.to)] AS path FROM graph g
UNION ALL
SELECT g.from, g.to, g.label, has(path, tuple(g.from, g.to)), arrayConcat(sg.path, [tuple(g.from, g.to)])
FROM graph g, search_graph sg
WHERE g.from = sg.to AND NOT is_cycle
)
SELECT * FROM search_graph WHERE is_cycle ORDER BY from;
```
```text
┌─from─┬─to─┬─label──┬─is_cycle─┬─path──────────────────────┐
│ 1 │ 4 │ 1 -> 4 │ true │ [(1,4),(4,5),(5,1),(1,4)] │
│ 4 │ 5 │ 4 -> 5 │ true │ [(4,5),(5,1),(1,4),(4,5)] │
│ 5 │ 1 │ 5 -> 1 │ true │ [(5,1),(1,4),(4,5),(5,1)] │
└──────┴────┴────────┴──────────┴───────────────────────────┘
```
### Infinite queries
It is also possible to use infinite recursive CTE queries if `LIMIT` is used in outer query:
**Example:** Infinite recursive CTE query
```sql
WITH RECURSIVE test_table AS (
SELECT 1 AS number
UNION ALL
SELECT number + 1 FROM test_table
)
SELECT sum(number) FROM (SELECT number FROM test_table LIMIT 100);
```
```text
┌─sum(number)─┐
│ 5050 │
└─────────────┘
```

View File

@ -25,7 +25,7 @@ If the `alter_sync` is set to `2` and some replicas are not active for more than
## TRUNCATE ALL TABLES
``` sql
TRUNCATE ALL TABLES [IF EXISTS] db [ON CLUSTER cluster]
TRUNCATE ALL TABLES FROM [IF EXISTS] db [ON CLUSTER cluster]
```
Removes all data from all tables in a database.

View File

@ -570,11 +570,8 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
if (params.full_access)
return true;
auto access_granted = [&]
auto access_granted = []
{
if (trace_log)
LOG_TRACE(trace_log, "Access granted: {}{}", (AccessRightsElement{flags, args...}.toStringWithoutOptions()),
(grant_option ? " WITH GRANT OPTION" : ""));
return true;
};
@ -582,9 +579,6 @@ bool ContextAccess::checkAccessImplHelper(AccessFlags flags, const Args &... arg
FormatStringHelper<String, FmtArgs...> fmt_string [[maybe_unused]],
FmtArgs && ...fmt_args [[maybe_unused]])
{
if (trace_log)
LOG_TRACE(trace_log, "Access denied: {}{}", (AccessRightsElement{flags, args...}.toStringWithoutOptions()),
(grant_option ? " WITH GRANT OPTION" : ""));
if constexpr (throw_if_denied)
throw Exception(error_code, std::move(fmt_string), getUserName(), std::forward<FmtArgs>(fmt_args)...);
return false;

View File

@ -16,7 +16,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int AGGREGATE_FUNCTION_THROW;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
namespace
@ -116,7 +116,7 @@ void registerAggregateFunctionAggThrow(AggregateFunctionFactory & factory)
if (parameters.size() == 1)
throw_probability = parameters[0].safeGet<Float64>();
else if (parameters.size() > 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} cannot have more than one parameter", name);
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} cannot have more than one parameter", name);
return std::make_shared<AggregateFunctionThrow>(argument_types, parameters, throw_probability);
});

View File

@ -27,6 +27,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
extern const int TOO_LARGE_ARRAY_SIZE;
extern const int CANNOT_CONVERT_TYPE;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
@ -74,7 +75,7 @@ public:
if (!params.empty())
{
if (params.size() > 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at most two parameters.", getName());
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at most two parameters.", getName());
default_value = params[0];

View File

@ -22,7 +22,8 @@ namespace ErrorCodes
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
namespace
@ -34,12 +35,12 @@ namespace
const std::string & name, const DataTypes & argument_types, const Array & parameters, const Settings *)
{
if (parameters.size() > 4)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
"Aggregate function {} requires at most four parameters: "
"learning_rate, l2_regularization_coef, mini-batch size and weights_updater method", name);
if (argument_types.size() < 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Aggregate function {} requires at least two arguments: target and model's parameters", name);
for (size_t i = 0; i < argument_types.size(); ++i)

View File

@ -17,19 +17,19 @@
#include <boost/math/distributions/normal.hpp>
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int BAD_ARGUMENTS;
}
namespace DB
{
struct Settings;
namespace ErrorCodes
{
extern const int NOT_IMPLEMENTED;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
extern const int BAD_ARGUMENTS;
}
namespace
{
@ -141,7 +141,7 @@ public:
: IAggregateFunctionDataHelper<MannWhitneyData, AggregateFunctionMannWhitney> ({arguments}, {}, createResultType())
{
if (params.size() > 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} require two parameter or less", getName());
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} require two parameter or less", getName());
if (params.empty())
{

View File

@ -14,7 +14,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NOT_IMPLEMENTED;
}
@ -118,7 +118,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
{
if (argument_types.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one argument", name);
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at least one argument", name);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);

View File

@ -12,7 +12,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -27,7 +27,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
{
if (argument_types.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one argument", name);
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at least one argument", name);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);

View File

@ -13,7 +13,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -29,7 +29,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
{
if (argument_types.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one argument", name);
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at least one argument", name);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);

View File

@ -13,7 +13,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -29,7 +29,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
{
if (argument_types.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one argument", name);
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at least one argument", name);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);

View File

@ -13,7 +13,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -28,7 +28,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
{
if (argument_types.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one argument", name);
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at least one argument", name);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);

View File

@ -13,7 +13,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -28,7 +28,7 @@ AggregateFunctionPtr createAggregateFunctionQuantile(
const std::string & name, const DataTypes & argument_types, const Array & params, const Settings *)
{
if (argument_types.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires at least one argument", name);
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires at least one argument", name);
const DataTypePtr & argument_type = argument_types[0];
WhichDataType which(argument_type);

View File

@ -253,9 +253,9 @@ private:
else
{
Y scaled;
bool has_overfllow = common::mulOverflow<Y>(y, levels_num, scaled);
bool has_overflow = common::mulOverflow<Y>(y, levels_num, scaled);
if (has_overfllow)
if (has_overflow)
y = y / (y_max / levels_num) + 1;
else
y = scaled / y_max + 1;

View File

@ -35,7 +35,7 @@ namespace ErrorCodes
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
extern const int LOGICAL_ERROR;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
@ -467,7 +467,7 @@ AggregateFunctionPtr createAggregateFunctionTopK(const std::string & name, const
if (!params.empty())
{
if (params.size() > 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
"Aggregate function '{}' requires three parameters or less", name);
threshold = applyVisitor(FieldVisitorConvertToNumber<UInt64>(), params[0]);

View File

@ -4,16 +4,16 @@
#include <AggregateFunctions/Moments.h>
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
}
namespace DB
{
struct Settings;
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
namespace
{
@ -80,7 +80,7 @@ AggregateFunctionPtr createAggregateFunctionWelchTTest(
assertBinary(name, argument_types);
if (parameters.size() > 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} requires zero or one parameter.", name);
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} requires zero or one parameter.", name);
if (!isNumber(argument_types[0]) || !isNumber(argument_types[1]))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Aggregate function {} only supports numerical types", name);

View File

@ -9,7 +9,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -26,7 +26,7 @@ public:
DataTypes transformArguments(const DataTypes & arguments) const override
{
if (arguments.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "-Array aggregate functions require at least one argument");
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Array aggregate functions require at least one argument");
DataTypes nested_arguments;
for (const auto & type : arguments)

View File

@ -18,7 +18,7 @@ struct Settings;
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -42,7 +42,7 @@ public:
, nested_func(nested), num_arguments(types.size())
{
if (num_arguments == 0)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Aggregate function {} require at least one argument", getName());
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Aggregate function {} require at least one argument", getName());
only_null_condition = types.back()->onlyNull();

View File

@ -266,6 +266,9 @@ void validateAggregates(const QueryTreeNodePtr & query_node, AggregatesValidatio
if (query_node_typed.hasHaving())
validate_group_by_columns_visitor.visit(query_node_typed.getHaving());
if (query_node_typed.hasQualify())
validate_group_by_columns_visitor.visit(query_node_typed.getQualify());
if (query_node_typed.hasOrderBy())
validate_group_by_columns_visitor.visit(query_node_typed.getOrderByNode());

View File

@ -33,42 +33,42 @@ Strings BackupCoordinationLocal::waitForStage(const String &, std::chrono::milli
return {};
}
void BackupCoordinationLocal::addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
void BackupCoordinationLocal::addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, const std::vector<PartNameAndChecksum> & part_names_and_checksums)
{
std::lock_guard lock{replicated_tables_mutex};
replicated_tables.addPartNames({table_shared_id, table_name_for_logs, replica_name, part_names_and_checksums});
replicated_tables.addPartNames({table_zk_path, table_name_for_logs, replica_name, part_names_and_checksums});
}
Strings BackupCoordinationLocal::getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const
Strings BackupCoordinationLocal::getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const
{
std::lock_guard lock{replicated_tables_mutex};
return replicated_tables.getPartNames(table_shared_id, replica_name);
return replicated_tables.getPartNames(table_zk_path, replica_name);
}
void BackupCoordinationLocal::addReplicatedMutations(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name, const std::vector<MutationInfo> & mutations)
void BackupCoordinationLocal::addReplicatedMutations(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, const std::vector<MutationInfo> & mutations)
{
std::lock_guard lock{replicated_tables_mutex};
replicated_tables.addMutations({table_shared_id, table_name_for_logs, replica_name, mutations});
replicated_tables.addMutations({table_zk_path, table_name_for_logs, replica_name, mutations});
}
std::vector<IBackupCoordination::MutationInfo> BackupCoordinationLocal::getReplicatedMutations(const String & table_shared_id, const String & replica_name) const
std::vector<IBackupCoordination::MutationInfo> BackupCoordinationLocal::getReplicatedMutations(const String & table_zk_path, const String & replica_name) const
{
std::lock_guard lock{replicated_tables_mutex};
return replicated_tables.getMutations(table_shared_id, replica_name);
return replicated_tables.getMutations(table_zk_path, replica_name);
}
void BackupCoordinationLocal::addReplicatedDataPath(const String & table_shared_id, const String & data_path)
void BackupCoordinationLocal::addReplicatedDataPath(const String & table_zk_path, const String & data_path)
{
std::lock_guard lock{replicated_tables_mutex};
replicated_tables.addDataPath({table_shared_id, data_path});
replicated_tables.addDataPath({table_zk_path, data_path});
}
Strings BackupCoordinationLocal::getReplicatedDataPaths(const String & table_shared_id) const
Strings BackupCoordinationLocal::getReplicatedDataPaths(const String & table_zk_path) const
{
std::lock_guard lock{replicated_tables_mutex};
return replicated_tables.getDataPaths(table_shared_id);
return replicated_tables.getDataPaths(table_zk_path);
}

View File

@ -29,16 +29,16 @@ public:
Strings waitForStage(const String & stage_to_wait) override;
Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) override;
void addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name,
void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
Strings getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const override;
Strings getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const override;
void addReplicatedMutations(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name,
void addReplicatedMutations(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
const std::vector<MutationInfo> & mutations) override;
std::vector<MutationInfo> getReplicatedMutations(const String & table_shared_id, const String & replica_name) const override;
std::vector<MutationInfo> getReplicatedMutations(const String & table_zk_path, const String & replica_name) const override;
void addReplicatedDataPath(const String & table_shared_id, const String & data_path) override;
Strings getReplicatedDataPaths(const String & table_shared_id) const override;
void addReplicatedDataPath(const String & table_zk_path, const String & data_path) override;
Strings getReplicatedDataPaths(const String & table_zk_path) const override;
void addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path) override;
Strings getReplicatedAccessFilePaths(const String & access_zk_path, AccessEntityType access_entity_type) const override;

View File

@ -358,7 +358,7 @@ String BackupCoordinationRemote::deserializeFromMultipleZooKeeperNodes(const Str
void BackupCoordinationRemote::addReplicatedPartNames(
const String & table_shared_id,
const String & table_zk_path,
const String & table_name_for_logs,
const String & replica_name,
const std::vector<PartNameAndChecksum> & part_names_and_checksums)
@ -374,22 +374,22 @@ void BackupCoordinationRemote::addReplicatedPartNames(
[&, &zk = holder.faulty_zookeeper]()
{
with_retries.renewZooKeeper(zk);
String path = zookeeper_path + "/repl_part_names/" + escapeForFileName(table_shared_id);
String path = zookeeper_path + "/repl_part_names/" + escapeForFileName(table_zk_path);
zk->createIfNotExists(path, "");
path += "/" + escapeForFileName(replica_name);
zk->createIfNotExists(path, ReplicatedPartNames::serialize(part_names_and_checksums, table_name_for_logs));
});
}
Strings BackupCoordinationRemote::getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const
Strings BackupCoordinationRemote::getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const
{
std::lock_guard lock{replicated_tables_mutex};
prepareReplicatedTables();
return replicated_tables->getPartNames(table_shared_id, replica_name);
return replicated_tables->getPartNames(table_zk_path, replica_name);
}
void BackupCoordinationRemote::addReplicatedMutations(
const String & table_shared_id,
const String & table_zk_path,
const String & table_name_for_logs,
const String & replica_name,
const std::vector<MutationInfo> & mutations)
@ -405,23 +405,23 @@ void BackupCoordinationRemote::addReplicatedMutations(
[&, &zk = holder.faulty_zookeeper]()
{
with_retries.renewZooKeeper(zk);
String path = zookeeper_path + "/repl_mutations/" + escapeForFileName(table_shared_id);
String path = zookeeper_path + "/repl_mutations/" + escapeForFileName(table_zk_path);
zk->createIfNotExists(path, "");
path += "/" + escapeForFileName(replica_name);
zk->createIfNotExists(path, ReplicatedMutations::serialize(mutations, table_name_for_logs));
});
}
std::vector<IBackupCoordination::MutationInfo> BackupCoordinationRemote::getReplicatedMutations(const String & table_shared_id, const String & replica_name) const
std::vector<IBackupCoordination::MutationInfo> BackupCoordinationRemote::getReplicatedMutations(const String & table_zk_path, const String & replica_name) const
{
std::lock_guard lock{replicated_tables_mutex};
prepareReplicatedTables();
return replicated_tables->getMutations(table_shared_id, replica_name);
return replicated_tables->getMutations(table_zk_path, replica_name);
}
void BackupCoordinationRemote::addReplicatedDataPath(
const String & table_shared_id, const String & data_path)
const String & table_zk_path, const String & data_path)
{
{
std::lock_guard lock{replicated_tables_mutex};
@ -434,18 +434,18 @@ void BackupCoordinationRemote::addReplicatedDataPath(
[&, &zk = holder.faulty_zookeeper]()
{
with_retries.renewZooKeeper(zk);
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_shared_id);
String path = zookeeper_path + "/repl_data_paths/" + escapeForFileName(table_zk_path);
zk->createIfNotExists(path, "");
path += "/" + escapeForFileName(data_path);
zk->createIfNotExists(path, "");
});
}
Strings BackupCoordinationRemote::getReplicatedDataPaths(const String & table_shared_id) const
Strings BackupCoordinationRemote::getReplicatedDataPaths(const String & table_zk_path) const
{
std::lock_guard lock{replicated_tables_mutex};
prepareReplicatedTables();
return replicated_tables->getDataPaths(table_shared_id);
return replicated_tables->getDataPaths(table_zk_path);
}
@ -464,16 +464,16 @@ void BackupCoordinationRemote::prepareReplicatedTables() const
with_retries.renewZooKeeper(zk);
String path = zookeeper_path + "/repl_part_names";
for (const String & escaped_table_shared_id : zk->getChildren(path))
for (const String & escaped_table_zk_path : zk->getChildren(path))
{
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
String path2 = path + "/" + escaped_table_shared_id;
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
String path2 = path + "/" + escaped_table_zk_path;
for (const String & escaped_replica_name : zk->getChildren(path2))
{
String replica_name = unescapeForFileName(escaped_replica_name);
auto part_names = ReplicatedPartNames::deserialize(zk->get(path2 + "/" + escaped_replica_name));
part_names_for_replicated_tables.push_back(
{table_shared_id, part_names.table_name_for_logs, replica_name, part_names.part_names_and_checksums});
{table_zk_path, part_names.table_name_for_logs, replica_name, part_names.part_names_and_checksums});
}
}
});
@ -489,16 +489,16 @@ void BackupCoordinationRemote::prepareReplicatedTables() const
with_retries.renewZooKeeper(zk);
String path = zookeeper_path + "/repl_mutations";
for (const String & escaped_table_shared_id : zk->getChildren(path))
for (const String & escaped_table_zk_path : zk->getChildren(path))
{
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
String path2 = path + "/" + escaped_table_shared_id;
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
String path2 = path + "/" + escaped_table_zk_path;
for (const String & escaped_replica_name : zk->getChildren(path2))
{
String replica_name = unescapeForFileName(escaped_replica_name);
auto mutations = ReplicatedMutations::deserialize(zk->get(path2 + "/" + escaped_replica_name));
mutations_for_replicated_tables.push_back(
{table_shared_id, mutations.table_name_for_logs, replica_name, mutations.mutations});
{table_zk_path, mutations.table_name_for_logs, replica_name, mutations.mutations});
}
}
});
@ -514,14 +514,14 @@ void BackupCoordinationRemote::prepareReplicatedTables() const
with_retries.renewZooKeeper(zk);
String path = zookeeper_path + "/repl_data_paths";
for (const String & escaped_table_shared_id : zk->getChildren(path))
for (const String & escaped_table_zk_path : zk->getChildren(path))
{
String table_shared_id = unescapeForFileName(escaped_table_shared_id);
String path2 = path + "/" + escaped_table_shared_id;
String table_zk_path = unescapeForFileName(escaped_table_zk_path);
String path2 = path + "/" + escaped_table_zk_path;
for (const String & escaped_data_path : zk->getChildren(path2))
{
String data_path = unescapeForFileName(escaped_data_path);
data_paths_for_replicated_tables.push_back({table_shared_id, data_path});
data_paths_for_replicated_tables.push_back({table_zk_path, data_path});
}
}
});

View File

@ -41,23 +41,23 @@ public:
Strings waitForStage(const String & stage_to_wait, std::chrono::milliseconds timeout) override;
void addReplicatedPartNames(
const String & table_shared_id,
const String & table_zk_path,
const String & table_name_for_logs,
const String & replica_name,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) override;
Strings getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const override;
Strings getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const override;
void addReplicatedMutations(
const String & table_shared_id,
const String & table_zk_path,
const String & table_name_for_logs,
const String & replica_name,
const std::vector<MutationInfo> & mutations) override;
std::vector<MutationInfo> getReplicatedMutations(const String & table_shared_id, const String & replica_name) const override;
std::vector<MutationInfo> getReplicatedMutations(const String & table_zk_path, const String & replica_name) const override;
void addReplicatedDataPath(const String & table_shared_id, const String & data_path) override;
Strings getReplicatedDataPaths(const String & table_shared_id) const override;
void addReplicatedDataPath(const String & table_zk_path, const String & data_path) override;
Strings getReplicatedDataPaths(const String & table_zk_path) const override;
void addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path) override;
Strings getReplicatedAccessFilePaths(const String & access_zk_path, AccessEntityType access_entity_type) const override;

View File

@ -151,7 +151,7 @@ BackupCoordinationReplicatedTables::~BackupCoordinationReplicatedTables() = defa
void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica && part_names)
{
const auto & table_shared_id = part_names.table_shared_id;
const auto & table_zk_path = part_names.table_zk_path;
const auto & table_name_for_logs = part_names.table_name_for_logs;
const auto & replica_name = part_names.replica_name;
const auto & part_names_and_checksums = part_names.part_names_and_checksums;
@ -159,7 +159,7 @@ void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica &
if (prepared)
throw Exception(ErrorCodes::LOGICAL_ERROR, "addPartNames() must not be called after preparing");
auto & table_info = table_infos[table_shared_id];
auto & table_info = table_infos[table_zk_path];
table_info.table_name_for_logs = table_name_for_logs;
if (!table_info.covered_parts_finder)
@ -200,11 +200,11 @@ void BackupCoordinationReplicatedTables::addPartNames(PartNamesForTableReplica &
}
}
Strings BackupCoordinationReplicatedTables::getPartNames(const String & table_shared_id, const String & replica_name) const
Strings BackupCoordinationReplicatedTables::getPartNames(const String & table_zk_path, const String & replica_name) const
{
prepare();
auto it = table_infos.find(table_shared_id);
auto it = table_infos.find(table_zk_path);
if (it == table_infos.end())
return {};
@ -218,7 +218,7 @@ Strings BackupCoordinationReplicatedTables::getPartNames(const String & table_sh
void BackupCoordinationReplicatedTables::addMutations(MutationsForTableReplica && mutations_for_table_replica)
{
const auto & table_shared_id = mutations_for_table_replica.table_shared_id;
const auto & table_zk_path = mutations_for_table_replica.table_zk_path;
const auto & table_name_for_logs = mutations_for_table_replica.table_name_for_logs;
const auto & replica_name = mutations_for_table_replica.replica_name;
const auto & mutations = mutations_for_table_replica.mutations;
@ -226,7 +226,7 @@ void BackupCoordinationReplicatedTables::addMutations(MutationsForTableReplica &
if (prepared)
throw Exception(ErrorCodes::LOGICAL_ERROR, "addMutations() must not be called after preparing");
auto & table_info = table_infos[table_shared_id];
auto & table_info = table_infos[table_zk_path];
table_info.table_name_for_logs = table_name_for_logs;
for (const auto & [mutation_id, mutation_entry] : mutations)
table_info.mutations.emplace(mutation_id, mutation_entry);
@ -236,11 +236,11 @@ void BackupCoordinationReplicatedTables::addMutations(MutationsForTableReplica &
}
std::vector<MutationInfo>
BackupCoordinationReplicatedTables::getMutations(const String & table_shared_id, const String & replica_name) const
BackupCoordinationReplicatedTables::getMutations(const String & table_zk_path, const String & replica_name) const
{
prepare();
auto it = table_infos.find(table_shared_id);
auto it = table_infos.find(table_zk_path);
if (it == table_infos.end())
return {};
@ -257,16 +257,16 @@ BackupCoordinationReplicatedTables::getMutations(const String & table_shared_id,
void BackupCoordinationReplicatedTables::addDataPath(DataPathForTableReplica && data_path_for_table_replica)
{
const auto & table_shared_id = data_path_for_table_replica.table_shared_id;
const auto & table_zk_path = data_path_for_table_replica.table_zk_path;
const auto & data_path = data_path_for_table_replica.data_path;
auto & table_info = table_infos[table_shared_id];
auto & table_info = table_infos[table_zk_path];
table_info.data_paths.emplace(data_path);
}
Strings BackupCoordinationReplicatedTables::getDataPaths(const String & table_shared_id) const
Strings BackupCoordinationReplicatedTables::getDataPaths(const String & table_zk_path) const
{
auto it = table_infos.find(table_shared_id);
auto it = table_infos.find(table_zk_path);
if (it == table_infos.end())
return {};

View File

@ -40,7 +40,7 @@ public:
struct PartNamesForTableReplica
{
String table_shared_id;
String table_zk_path;
String table_name_for_logs;
String replica_name;
std::vector<PartNameAndChecksum> part_names_and_checksums;
@ -55,13 +55,13 @@ public:
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
/// This is the same list as it was added by call of the function addPartNames() but without duplications and without
/// parts covered by another parts.
Strings getPartNames(const String & table_shared_id, const String & replica_name) const;
Strings getPartNames(const String & table_zk_path, const String & replica_name) const;
using MutationInfo = IBackupCoordination::MutationInfo;
struct MutationsForTableReplica
{
String table_shared_id;
String table_zk_path;
String table_name_for_logs;
String replica_name;
std::vector<MutationInfo> mutations;
@ -71,11 +71,11 @@ public:
void addMutations(MutationsForTableReplica && mutations_for_table_replica);
/// Returns all mutations of a replicated table which are not finished for some data parts added by addReplicatedPartNames().
std::vector<MutationInfo> getMutations(const String & table_shared_id, const String & replica_name) const;
std::vector<MutationInfo> getMutations(const String & table_zk_path, const String & replica_name) const;
struct DataPathForTableReplica
{
String table_shared_id;
String table_zk_path;
String data_path;
};
@ -85,7 +85,7 @@ public:
void addDataPath(DataPathForTableReplica && data_path_for_table_replica);
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedDataPath()).
Strings getDataPaths(const String & table_shared_id) const;
Strings getDataPaths(const String & table_zk_path) const;
private:
void prepare() const;
@ -110,7 +110,7 @@ private:
std::unordered_set<String> data_paths;
};
std::map<String /* table_shared_id */, TableInfo> table_infos; /// Should be ordered because we need this map to be in the same order on every replica.
std::map<String /* table_zk_path */, TableInfo> table_infos; /// Should be ordered because we need this map to be in the same order on every replica.
mutable bool prepared = false;
};

View File

@ -11,6 +11,7 @@
#include <Parsers/ASTCreateQuery.h>
#include <Parsers/formatAST.h>
#include <Storages/IStorage.h>
#include <Storages/MergeTree/extractZooKeeperPathFromReplicatedTableDef.h>
#include <base/chrono_io.h>
#include <base/insertAtEnd.h>
#include <base/scope_guard.h>
@ -758,7 +759,7 @@ void BackupEntriesCollector::makeBackupEntriesForDatabasesDefs()
checkIsQueryCancelled();
ASTPtr new_create_query = database_info.create_database_query;
adjustCreateQueryForBackup(new_create_query, context->getGlobalContext(), nullptr);
adjustCreateQueryForBackup(new_create_query, context->getGlobalContext());
renameDatabaseAndTableNameInCreateQuery(new_create_query, renaming_map, context->getGlobalContext());
const String & metadata_path_in_backup = database_info.metadata_path_in_backup;
@ -775,7 +776,8 @@ void BackupEntriesCollector::makeBackupEntriesForTablesDefs()
checkIsQueryCancelled();
ASTPtr new_create_query = table_info.create_table_query;
adjustCreateQueryForBackup(new_create_query, context->getGlobalContext(), &table_info.replicated_table_shared_id);
table_info.replicated_table_zk_path = extractZooKeeperPathFromReplicatedTableDef(new_create_query->as<const ASTCreateQuery &>(), context);
adjustCreateQueryForBackup(new_create_query, context->getGlobalContext());
renameDatabaseAndTableNameInCreateQuery(new_create_query, renaming_map, context->getGlobalContext());
const String & metadata_path_in_backup = table_info.metadata_path_in_backup;
@ -814,8 +816,8 @@ void BackupEntriesCollector::makeBackupEntriesForTableData(const QualifiedTableN
/// If this table is replicated in this case we call IBackupCoordination::addReplicatedDataPath() which will cause
/// other replicas to fill the storage's data in the backup.
/// If this table is not replicated we'll do nothing leaving the storage's data empty in the backup.
if (table_info.replicated_table_shared_id)
backup_coordination->addReplicatedDataPath(*table_info.replicated_table_shared_id, data_path_in_backup);
if (table_info.replicated_table_zk_path)
backup_coordination->addReplicatedDataPath(*table_info.replicated_table_zk_path, data_path_in_backup);
return;
}

View File

@ -164,7 +164,7 @@ private:
ASTPtr create_table_query;
String metadata_path_in_backup;
std::filesystem::path data_path_in_backup;
std::optional<String> replicated_table_shared_id;
std::optional<String> replicated_table_zk_path;
std::optional<ASTs> partitions;
};

View File

@ -103,7 +103,7 @@ bool compareRestoredTableDef(const IAST & restored_table_create_query, const IAS
auto adjust_before_comparison = [&](const IAST & query) -> ASTPtr
{
auto new_query = query.clone();
adjustCreateQueryForBackup(new_query, global_context, nullptr);
adjustCreateQueryForBackup(new_query, global_context);
ASTCreateQuery & create = typeid_cast<ASTCreateQuery &>(*new_query);
create.setUUID({});
create.if_not_exists = false;

View File

@ -27,9 +27,6 @@ namespace
{
/// Precondition: engine_name.starts_with("Replicated") && engine_name.ends_with("MergeTree")
if (data.replicated_table_shared_id)
*data.replicated_table_shared_id = StorageReplicatedMergeTree::tryGetTableSharedIDFromCreateQuery(*data.create_query, data.global_context);
/// Before storing the metadata in a backup we have to find a zookeeper path in its definition and turn the table's UUID in there
/// back into "{uuid}", and also we probably can remove the zookeeper path and replica name if they're default.
/// So we're kind of reverting what we had done to the table's definition in registerStorageMergeTree.cpp before we created this table.
@ -98,12 +95,9 @@ void DDLAdjustingForBackupVisitor::visit(ASTPtr ast, const Data & data)
visitCreateQuery(*create, data);
}
void adjustCreateQueryForBackup(ASTPtr ast, const ContextPtr & global_context, std::optional<String> * replicated_table_shared_id)
void adjustCreateQueryForBackup(ASTPtr ast, const ContextPtr & global_context)
{
if (replicated_table_shared_id)
*replicated_table_shared_id = {};
DDLAdjustingForBackupVisitor::Data data{ast, global_context, replicated_table_shared_id};
DDLAdjustingForBackupVisitor::Data data{ast, global_context};
DDLAdjustingForBackupVisitor::Visitor{data}.visit(ast);
}

View File

@ -12,9 +12,7 @@ class Context;
using ContextPtr = std::shared_ptr<const Context>;
/// Changes a create query to a form which is appropriate or suitable for saving in a backup.
/// Also extracts a replicated table's shared ID from the create query if this is a create query for a replicated table.
/// `replicated_table_shared_id` can be null if you don't need that.
void adjustCreateQueryForBackup(ASTPtr ast, const ContextPtr & global_context, std::optional<String> * replicated_table_shared_id);
void adjustCreateQueryForBackup(ASTPtr ast, const ContextPtr & global_context);
/// Visits ASTCreateQuery and changes it to a form which is appropriate or suitable for saving in a backup.
class DDLAdjustingForBackupVisitor
@ -24,7 +22,6 @@ public:
{
ASTPtr create_query;
ContextPtr global_context;
std::optional<String> * replicated_table_shared_id = nullptr;
};
using Visitor = InDepthNodeVisitor<DDLAdjustingForBackupVisitor, false>;

View File

@ -36,13 +36,13 @@ public:
/// Multiple replicas of the replicated table call this function and then the added part names can be returned by call of the function
/// getReplicatedPartNames().
/// Checksums are used only to control that parts under the same names on different replicas are the same.
virtual void addReplicatedPartNames(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name,
virtual void addReplicatedPartNames(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name,
const std::vector<PartNameAndChecksum> & part_names_and_checksums) = 0;
/// Returns the names of the parts which a specified replica of a replicated table should put to the backup.
/// This is the same list as it was added by call of the function addReplicatedPartNames() but without duplications and without
/// parts covered by another parts.
virtual Strings getReplicatedPartNames(const String & table_shared_id, const String & replica_name) const = 0;
virtual Strings getReplicatedPartNames(const String & table_zk_path, const String & replica_name) const = 0;
struct MutationInfo
{
@ -51,10 +51,10 @@ public:
};
/// Adds information about mutations of a replicated table.
virtual void addReplicatedMutations(const String & table_shared_id, const String & table_name_for_logs, const String & replica_name, const std::vector<MutationInfo> & mutations) = 0;
virtual void addReplicatedMutations(const String & table_zk_path, const String & table_name_for_logs, const String & replica_name, const std::vector<MutationInfo> & mutations) = 0;
/// Returns all mutations of a replicated table which are not finished for some data parts added by addReplicatedPartNames().
virtual std::vector<MutationInfo> getReplicatedMutations(const String & table_shared_id, const String & replica_name) const = 0;
virtual std::vector<MutationInfo> getReplicatedMutations(const String & table_zk_path, const String & replica_name) const = 0;
/// Adds information about KeeperMap tables
virtual void addKeeperMapTable(const String & table_zookeeper_root_path, const String & table_id, const String & data_path_in_backup) = 0;
@ -65,10 +65,10 @@ public:
/// Adds a data path in backup for a replicated table.
/// Multiple replicas of the replicated table call this function and then all the added paths can be returned by call of the function
/// getReplicatedDataPaths().
virtual void addReplicatedDataPath(const String & table_shared_id, const String & data_path) = 0;
virtual void addReplicatedDataPath(const String & table_zk_path, const String & data_path) = 0;
/// Returns all the data paths in backup added for a replicated table (see also addReplicatedDataPath()).
virtual Strings getReplicatedDataPaths(const String & table_shared_id) const = 0;
virtual Strings getReplicatedDataPaths(const String & table_zk_path) const = 0;
/// Adds a path to access.txt file keeping access entities of a ReplicatedAccessStorage.
virtual void addReplicatedAccessFilePath(const String & access_zk_path, AccessEntityType access_entity_type, const String & file_path) = 0;

View File

@ -2616,7 +2616,7 @@ void ClientBase::runInteractive()
{
// If a separate connection loading suggestions failed to open a new session,
// use the main session to receive them.
suggest->load(*connection, connection_parameters.timeouts, config().getInt("suggestion_limit"));
suggest->load(*connection, connection_parameters.timeouts, config().getInt("suggestion_limit"), global_context->getClientInfo());
}
try

View File

@ -99,7 +99,10 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p
try
{
auto connection = ConnectionType::createConnection(connection_parameters, my_context);
fetch(*connection, connection_parameters.timeouts, getLoadSuggestionQuery(suggestion_limit, std::is_same_v<ConnectionType, LocalConnection>));
fetch(*connection,
connection_parameters.timeouts,
getLoadSuggestionQuery(suggestion_limit, std::is_same_v<ConnectionType, LocalConnection>),
my_context->getClientInfo());
}
catch (const Exception & e)
{
@ -138,11 +141,12 @@ void Suggest::load(ContextPtr context, const ConnectionParameters & connection_p
void Suggest::load(IServerConnection & connection,
const ConnectionTimeouts & timeouts,
Int32 suggestion_limit)
Int32 suggestion_limit,
const ClientInfo & client_info)
{
try
{
fetch(connection, timeouts, getLoadSuggestionQuery(suggestion_limit, true));
fetch(connection, timeouts, getLoadSuggestionQuery(suggestion_limit, true), client_info);
}
catch (...)
{
@ -151,10 +155,10 @@ void Suggest::load(IServerConnection & connection,
}
}
void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query)
void Suggest::fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query, const ClientInfo & client_info)
{
connection.sendQuery(
timeouts, query, {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, nullptr, nullptr, false, {});
timeouts, query, {} /* query_parameters */, "" /* query_id */, QueryProcessingStage::Complete, nullptr, &client_info, false, {});
while (true)
{

View File

@ -31,7 +31,8 @@ public:
void load(IServerConnection & connection,
const ConnectionTimeouts & timeouts,
Int32 suggestion_limit);
Int32 suggestion_limit,
const ClientInfo & client_info);
/// Older server versions cannot execute the query loading suggestions.
static constexpr int MIN_SERVER_REVISION = DBMS_MIN_PROTOCOL_VERSION_WITH_VIEW_IF_PERMITTED;
@ -39,7 +40,7 @@ public:
int getLastError() const { return last_error.load(); }
private:
void fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query);
void fetch(IServerConnection & connection, const ConnectionTimeouts & timeouts, const std::string & query, const ClientInfo & client_info);
void fillWordsFromBlock(const Block & block);

View File

@ -17,19 +17,18 @@ enum Time
template <Time unit>
struct ProfileEventTimeIncrement
{
explicit ProfileEventTimeIncrement<time>(ProfileEvents::Event event_)
explicit ProfileEventTimeIncrement(ProfileEvents::Event event_)
: event(event_), watch(CLOCK_MONOTONIC) {}
template <Time time = unit>
UInt64 elapsed()
{
if constexpr (time == Time::Nanoseconds)
if constexpr (unit == Time::Nanoseconds)
return watch.elapsedNanoseconds();
else if constexpr (time == Time::Microseconds)
else if constexpr (unit == Time::Microseconds)
return watch.elapsedMicroseconds();
else if constexpr (time == Time::Milliseconds)
else if constexpr (unit == Time::Milliseconds)
return watch.elapsedMilliseconds();
else if constexpr (time == Time::Seconds)
else if constexpr (unit == Time::Seconds)
return watch.elapsedSeconds();
}

View File

@ -306,7 +306,7 @@ void FstBuilder::add(std::string_view current_word, Output current_output)
size_t current_word_len = current_word.size();
if (current_word_len > MAX_TERM_LENGTH)
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Cannot build inverted index: The maximum term length is {}, this is exceeded by term {}", MAX_TERM_LENGTH, current_word_len);
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Cannot build full-text index: The maximum term length is {}, this is exceeded by term {}", MAX_TERM_LENGTH, current_word_len);
size_t prefix_length_plus1 = getCommonPrefixLength(current_word, previous_word) + 1;

View File

@ -138,6 +138,12 @@ inline bool isPrintableASCII(char c)
return uc >= 32 && uc <= 126; /// 127 is ASCII DEL.
}
inline bool isCSIFinalByte(char c)
{
uint8_t uc = c;
return uc >= 0x40 && uc <= 0x7E; /// ASCII @AZ[\]^_`az{|}~
}
inline bool isPunctuationASCII(char c)
{
uint8_t uc = c;

View File

@ -97,13 +97,14 @@ namespace
enum ComputeWidthMode
{
Width, /// Calculate and return visible width
BytesBeforLimit /// Calculate and return the maximum number of bytes when substring fits in visible width.
BytesBeforeLimit /// Calculate and return the maximum number of bytes when substring fits in visible width.
};
template <ComputeWidthMode mode>
size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept
{
UTF8Decoder decoder;
int isEscapeSequence = false;
size_t width = 0;
size_t rollback = 0;
for (size_t i = 0; i < size; ++i)
@ -132,21 +133,32 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
}
else
{
i += 16;
width += 16;
if (isEscapeSequence)
{
break;
}
else
{
i += 16;
width += 16;
}
}
}
#endif
while (i < size && isPrintableASCII(data[i]))
{
++width;
if (!isEscapeSequence)
++width;
else if (isCSIFinalByte(data[i]) && data[i - 1] != '\x1b')
isEscapeSequence = false; /// end of CSI escape sequence reached
++i;
}
/// Now i points to position in bytes after regular ASCII sequence
/// and if width > limit, then (width - limit) is the number of extra ASCII characters after width limit.
if (mode == BytesBeforLimit && width > limit)
if (mode == BytesBeforeLimit && width > limit)
return i - (width - limit);
switch (decoder.decode(data[i]))
@ -162,20 +174,18 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
}
case UTF8Decoder::ACCEPT:
{
// there are special control characters that manipulate the terminal output.
// (`0x08`, `0x09`, `0x0a`, `0x0b`, `0x0c`, `0x0d`, `0x1b`)
// Since we don't touch the original column data, there is no easy way to escape them.
// TODO: escape control characters
// TODO: multiline support for '\n'
// special treatment for '\t'
// special treatment for '\t' and for ESC
size_t next_width = width;
if (decoder.codepoint == '\t')
if (decoder.codepoint == '\x1b')
isEscapeSequence = true;
else if (decoder.codepoint == '\t')
next_width += 8 - (prefix + width) % 8;
else
next_width += wcwidth(decoder.codepoint);
if (mode == BytesBeforLimit && next_width > limit)
if (mode == BytesBeforeLimit && next_width > limit)
return i - rollback;
width = next_width;
@ -189,7 +199,7 @@ size_t computeWidthImpl(const UInt8 * data, size_t size, size_t prefix, size_t l
}
// no need to handle trailing sequence as they have zero width
return (mode == BytesBeforLimit) ? size : width;
return (mode == BytesBeforeLimit) ? size : width;
}
}
@ -202,7 +212,7 @@ size_t computeWidth(const UInt8 * data, size_t size, size_t prefix) noexcept
size_t computeBytesBeforeWidth(const UInt8 * data, size_t size, size_t prefix, size_t limit) noexcept
{
return computeWidthImpl<BytesBeforLimit>(data, size, prefix, limit);
return computeWidthImpl<BytesBeforeLimit>(data, size, prefix, limit);
}
}

View File

@ -235,7 +235,7 @@ class IColumn;
M(Bool, do_not_merge_across_partitions_select_final, false, "Merge parts only in one partition in select final", 0) \
M(Bool, split_parts_ranges_into_intersecting_and_non_intersecting_final, true, "Split parts ranges into intersecting and non intersecting during FINAL optimization", 0) \
M(Bool, split_intersecting_parts_ranges_into_layers_final, true, "Split intersecting parts ranges into layers during FINAL optimization", 0) \
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental fulltext (inverted) index.", 0) \
M(Bool, allow_experimental_inverted_index, false, "If it is set to true, allow to use experimental full-text index.", 0) \
\
M(UInt64, mysql_max_rows_to_insert, 65536, "The maximum number of rows in MySQL batch insertion of the MySQL storage engine", 0) \
M(Bool, mysql_map_string_to_text_in_show_columns, true, "If enabled, String type will be mapped to TEXT in SHOW [FULL] COLUMNS, BLOB otherwise. Has an effect only when the connection is made through the MySQL wire protocol.", 0) \
@ -1006,6 +1006,7 @@ class IColumn;
M(Bool, input_format_tsv_empty_as_default, false, "Treat empty fields in TSV input as default values.", 0) \
M(Bool, input_format_tsv_enum_as_number, false, "Treat inserted enum values in TSV formats as enum indices.", 0) \
M(Bool, input_format_null_as_default, true, "Initialize null fields with default values if the data type of this field is not nullable and it is supported by the input format", 0) \
M(Bool, input_format_force_null_for_omitted_fields, false, "Force initialize omitted fields with null values", 0) \
M(Bool, input_format_arrow_case_insensitive_column_matching, false, "Ignore case when matching Arrow columns with CH columns.", 0) \
M(Int64, input_format_orc_row_batch_size, 100'000, "Batch size when reading ORC stripes.", 0) \
M(Bool, input_format_orc_case_insensitive_column_matching, false, "Ignore case when matching ORC columns with CH columns.", 0) \

View File

@ -91,6 +91,7 @@ static std::map<ClickHouseVersion, SettingsChangesHistory::SettingsChanges> sett
{"cross_join_min_rows_to_compress", 0, 10000000, "A new setting."},
{"cross_join_min_bytes_to_compress", 0, 1_GiB, "A new setting."},
{"prefer_external_sort_block_bytes", 0, DEFAULT_BLOCK_SIZE * 256, "Prefer maximum block bytes for external sort, reduce the memory usage during merging."},
{"input_format_force_null_for_omitted_fields", false, false, "Disable type-defaults for omitted fields when needed"},
}},
{"24.4", {{"input_format_json_throw_on_bad_escape_sequence", true, true, "Allow to save JSON strings with bad escape sequences"},
{"max_parsing_threads", 0, 0, "Add a separate setting to control number of threads in parallel parsing from files"},

View File

@ -118,22 +118,7 @@ bool DataTypeMap::equals(const IDataType & rhs) const
bool DataTypeMap::checkKeyType(DataTypePtr key_type)
{
if (key_type->getTypeId() == TypeIndex::LowCardinality)
{
const auto & low_cardinality_data_type = assert_cast<const DataTypeLowCardinality &>(*key_type);
if (!isStringOrFixedString(*(low_cardinality_data_type.getDictionaryType())))
return false;
}
else if (!key_type->isValueRepresentedByInteger()
&& !isStringOrFixedString(*key_type)
&& !WhichDataType(key_type).isNothing()
&& !WhichDataType(key_type).isIPv6()
&& !WhichDataType(key_type).isUUID())
{
return false;
}
return true;
return !isNullableOrLowCardinalityNullable(key_type);
}
DataTypePtr DataTypeMap::getNestedTypeWithUnnamedTuple() const

View File

@ -401,7 +401,7 @@ std::string ExternalQueryBuilder::composeLoadKeysQuery(
{
writeString("SELECT * FROM (", out);
writeString(query, out);
writeString(") WHERE ", out);
writeString(") AS subquery WHERE ", out);
composeKeysCondition(key_columns, requested_rows, method, partition_key_prefix, out);
writeString(";", out);

View File

@ -41,7 +41,7 @@ FileSegmentRangeWriter::FileSegmentRangeWriter(
{
}
bool FileSegmentRangeWriter::write(const char * data, size_t size, size_t offset, FileSegmentKind segment_kind)
bool FileSegmentRangeWriter::write(char * data, size_t size, size_t offset, FileSegmentKind segment_kind)
{
if (finalized)
return false;

View File

@ -39,7 +39,7 @@ public:
* Write a range of file segments. Allocate file segment of `max_file_segment_size` and write to
* it until it is full and then allocate next file segment.
*/
bool write(const char * data, size_t size, size_t offset, FileSegmentKind segment_kind);
bool write(char * data, size_t size, size_t offset, FileSegmentKind segment_kind);
void finalize();

View File

@ -73,9 +73,17 @@ ObjectStoragePtr createObjectStorage(
return std::make_shared<PlainObjectStorage<BaseObjectStorage>>(std::forward<Args>(args)...);
else if (isPlainRewritableStorage(type, config, config_prefix))
{
/// TODO(jkartseva@): Test support for generic disk type
if (type != ObjectStorageType::S3)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "plain_rewritable metadata storage support is implemented only for S3");
/// HDFS object storage currently does not support iteration and does not implement listObjects method.
/// StaticWeb object storage is read-only and works with its dedicated metadata type.
constexpr auto supported_object_storage_types
= std::array{ObjectStorageType::S3, ObjectStorageType::Local, ObjectStorageType::Azure};
if (std::find(supported_object_storage_types.begin(), supported_object_storage_types.end(), type)
== supported_object_storage_types.end())
throw Exception(
ErrorCodes::NOT_IMPLEMENTED,
"plain_rewritable metadata storage support is not implemented for '{}' object storage",
DataSourceDescription{DataSourceType::ObjectStorage, type, MetadataStorageType::PlainRewritable, /*description*/ ""}
.toString());
return std::make_shared<PlainRewritableObjectStorage<BaseObjectStorage>>(std::forward<Args>(args)...);
}

View File

@ -1,16 +1,26 @@
#pragma once
#include <Disks/ObjectStorages/IObjectStorage.h>
#include <Common/ObjectStorageKeyGenerator.h>
#include "CommonPathPrefixKeyGenerator.h"
namespace DB
{
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
}
template <typename BaseObjectStorage>
class PlainRewritableObjectStorage : public BaseObjectStorage
{
public:
template <class... Args>
explicit PlainRewritableObjectStorage(Args &&... args) : BaseObjectStorage(std::forward<Args>(args)...)
explicit PlainRewritableObjectStorage(Args &&... args)
: BaseObjectStorage(std::forward<Args>(args)...)
/// A basic key generator is required for checking S3 capabilities,
/// it will be reset later by metadata storage.
, key_generator(createObjectStorageKeysGeneratorAsIsWithPrefix(BaseObjectStorage::getCommonKeyPrefix()))
{
}
@ -19,6 +29,33 @@ public:
bool isWriteOnce() const override { return false; }
bool isPlain() const override { return true; }
ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override;
ObjectStorageKey generateObjectKeyPrefixForDirectoryPath(const std::string & path) const override;
void setKeysGenerator(ObjectStorageKeysGeneratorPtr gen) override { key_generator = gen; }
private:
ObjectStorageKeysGeneratorPtr key_generator;
};
template <typename BaseObjectStorage>
ObjectStorageKey PlainRewritableObjectStorage<BaseObjectStorage>::generateObjectKeyForPath(const std::string & path) const
{
if (!key_generator)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set");
return key_generator->generate(path, /* is_directory */ false);
}
template <typename BaseObjectStorage>
ObjectStorageKey PlainRewritableObjectStorage<BaseObjectStorage>::generateObjectKeyPrefixForDirectoryPath(const std::string & path) const
{
if (!key_generator)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set");
return key_generator->generate(path, /* is_directory */ true);
}
}

View File

@ -574,13 +574,6 @@ ObjectStorageKey S3ObjectStorage::generateObjectKeyForPath(const std::string & p
return key_generator->generate(path, /* is_directory */ false);
}
ObjectStorageKey S3ObjectStorage::generateObjectKeyPrefixForDirectoryPath(const std::string & path) const
{
if (!key_generator)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Key generator is not set");
return key_generator->generate(path, /* is_directory */ true);
}
}
#endif

View File

@ -159,12 +159,9 @@ public:
bool supportParallelWrite() const override { return true; }
ObjectStorageKey generateObjectKeyForPath(const std::string & path) const override;
ObjectStorageKey generateObjectKeyPrefixForDirectoryPath(const std::string & path) const override;
bool isReadOnly() const override { return s3_settings.get()->read_only; }
void setKeysGenerator(ObjectStorageKeysGeneratorPtr gen) override { key_generator = gen; }
private:
void setNewSettings(std::unique_ptr<S3ObjectStorageSettings> && s3_settings_);

View File

@ -462,15 +462,18 @@ StoragePolicySelectorPtr StoragePolicySelector::updateFromConfig(const Poco::Uti
/// First pass, check.
for (const auto & [name, policy] : policies)
{
if (name.starts_with(TMP_STORAGE_POLICY_PREFIX))
continue;
if (!name.starts_with(TMP_STORAGE_POLICY_PREFIX))
{
if (!result->policies.contains(name))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage policy {} is missing in new configuration", backQuote(name));
if (!result->policies.contains(name))
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Storage policy {} is missing in new configuration", backQuote(name));
policy->checkCompatibleWith(result->policies[name]);
}
policy->checkCompatibleWith(result->policies[name]);
for (const auto & disk : policy->getDisks())
{
disks_before_reload.insert(disk->getName());
}
}
/// Second pass, load.

View File

@ -146,6 +146,7 @@ FormatSettings getFormatSettings(const ContextPtr & context, const Settings & se
format_settings.json.throw_on_bad_escape_sequence = settings.input_format_json_throw_on_bad_escape_sequence;
format_settings.json.ignore_unnecessary_fields = settings.input_format_json_ignore_unnecessary_fields;
format_settings.null_as_default = settings.input_format_null_as_default;
format_settings.force_null_for_omitted_fields = settings.input_format_force_null_for_omitted_fields;
format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros;
format_settings.parquet.row_group_rows = settings.output_format_parquet_row_group_size;
format_settings.parquet.row_group_bytes = settings.output_format_parquet_row_group_size_bytes;

View File

@ -32,6 +32,7 @@ struct FormatSettings
bool write_statistics = true;
bool import_nested_json = false;
bool null_as_default = true;
bool force_null_for_omitted_fields = false;
bool decimal_trailing_zeros = false;
bool defaults_for_omitted_fields = true;
bool is_writing_to_terminal = false;

View File

@ -4853,7 +4853,7 @@ FunctionBasePtr createFunctionBaseCast(
DataTypeUInt8, DataTypeUInt16, DataTypeUInt32, DataTypeUInt64, DataTypeUInt128, DataTypeUInt256,
DataTypeInt8, DataTypeInt16, DataTypeInt32, DataTypeInt64, DataTypeInt128, DataTypeInt256,
DataTypeFloat32, DataTypeFloat64,
DataTypeDate, DataTypeDate32, DataTypeDateTime,
DataTypeDate, DataTypeDate32, DataTypeDateTime, DataTypeDateTime64,
DataTypeString>(return_type.get(), [&](auto & type)
{
monotonicity = FunctionTo<std::decay_t<decltype(type)>>::Type::Monotonic::get;

View File

@ -11,7 +11,7 @@ namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
}
@ -35,7 +35,7 @@ public:
{
if (arguments.empty())
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Function {} needs at least one argument; passed {}.",
getName(),
arguments.size());

View File

@ -10,7 +10,7 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
template <typename Name, typename Impl>
@ -37,7 +37,7 @@ struct MultiSearchFirstPositionImpl
{
// For performance of Volnitsky search, it is crucial to save only one byte for pattern number.
if (needles_arr.size() > std::numeric_limits<UInt8>::max())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at most {}",
name, std::to_string(needles_arr.size()), std::to_string(std::numeric_limits<UInt8>::max()));

View File

@ -10,7 +10,7 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
template <typename Name, typename Impl>
@ -37,7 +37,7 @@ struct MultiSearchImpl
{
// For performance of Volnitsky search, it is crucial to save only one byte for pattern number.
if (needles_arr.size() > std::numeric_limits<UInt8>::max())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at most {}",
name, needles_arr.size(), std::to_string(std::numeric_limits<UInt8>::max()));

View File

@ -182,11 +182,37 @@ struct MapToNestedAdapter : public MapAdapterBase<MapToNestedAdapter<Name, retur
/// Adapter that extracts array with keys or values from Map columns.
template <typename Name, size_t position>
struct MapToSubcolumnAdapter : public MapAdapterBase<MapToSubcolumnAdapter<Name, position>, Name>
struct MapToSubcolumnAdapter
{
static_assert(position <= 1);
using MapAdapterBase<MapToSubcolumnAdapter, Name>::extractNestedTypes;
using MapAdapterBase<MapToSubcolumnAdapter, Name>::extractNestedTypesAndColumns;
static void extractNestedTypes(DataTypes & types)
{
if (types.empty())
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be at least 1",
Name::name,
types.size());
DataTypes new_types = {types[0]};
MapAdapterBase<MapToSubcolumnAdapter, Name>::extractNestedTypes(new_types);
types[0] = new_types[0];
}
static void extractNestedTypesAndColumns(ColumnsWithTypeAndName & arguments)
{
if (arguments.empty())
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
"Number of arguments for function {} doesn't match: passed {}, should be at least 1",
Name::name,
arguments.size());
ColumnsWithTypeAndName new_arguments = {arguments[0]};
MapAdapterBase<MapToSubcolumnAdapter, Name>::extractNestedTypesAndColumns(new_arguments);
arguments[0] = new_arguments[0];
}
static DataTypePtr extractNestedType(const DataTypeMap & type_map)
{

View File

@ -18,7 +18,7 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
@ -43,7 +43,7 @@ public:
DataTypePtr getReturnTypeImpl(const DataTypes & arguments) const override
{
if (arguments.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at least 1.",
getName(), arguments.size());

View File

@ -59,7 +59,8 @@ namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int BAD_ARGUMENTS;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
}
@ -101,7 +102,7 @@ public:
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at least 1.",
getName(), arguments.size());
@ -238,7 +239,7 @@ ColumnPtr FunctionArrayEnumerateRankedExtended<Derived>::executeImpl(
}
if (offsets_by_depth.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "No arrays passed to function {}", getName());
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No arrays passed to function {}", getName());
auto res_nested = ColumnUInt32::create();

View File

@ -14,7 +14,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
extern const int TYPE_MISMATCH;
}
@ -41,7 +41,7 @@ public:
void getLambdaArgumentTypes(DataTypes & arguments) const override
{
if (arguments.size() < 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires as arguments a lambda function, at least one array and an accumulator", getName());
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} requires as arguments a lambda function, at least one array and an accumulator", getName());
DataTypes accumulator_and_array_types(arguments.size() - 1);
accumulator_and_array_types[0] = arguments.back();
@ -64,7 +64,7 @@ public:
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.size() < 3)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires as arguments a lambda function, at least one array and an accumulator", getName());
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} requires as arguments a lambda function, at least one array and an accumulator", getName());
const auto * lambda_function_type = checkAndGetDataType<DataTypeFunction>(arguments[0].type.get());
if (!lambda_function_type)

View File

@ -21,7 +21,7 @@ namespace DB
namespace ErrorCodes
{
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_COLUMN;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
@ -73,7 +73,7 @@ DataTypePtr FunctionArrayReduce::getReturnTypeImpl(const ColumnsWithTypeAndName
/// (possibly with parameters in parentheses, for example: "quantile(0.99)").
if (arguments.size() < 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at least 2.",
getName(), arguments.size());

View File

@ -14,7 +14,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_COLUMN;
}
@ -39,7 +39,7 @@ public:
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Function {} needs at least one argument; passed {}." , getName(), arguments.size());
DataTypes arguments_types;

View File

@ -16,7 +16,7 @@ namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
}
using namespace GatherUtils;
@ -48,7 +48,7 @@ public:
{
if (arguments.size() < 2)
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at least 2",
getName(),
arguments.size());
@ -225,7 +225,7 @@ public:
{
if (arguments.empty())
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at least 1.",
getName(),
arguments.size());

View File

@ -17,7 +17,7 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
class FunctionGenerateULID : public IFunction
@ -45,7 +45,7 @@ public:
{
if (arguments.size() > 1)
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be 0 or 1.",
getName(), arguments.size());

View File

@ -47,6 +47,10 @@ public:
bool isSuitableForShortCircuitArgumentsExecution(const DataTypesWithConstInfo & /*arguments*/) const override { return false; }
/// Change it to never return LowCardinality, making it consistent when using groupingForRollup / groupingForforCube
/// with __grouping_set
bool canBeExecutedOnLowCardinalityDictionary() const override { return false; }
DataTypePtr getReturnTypeImpl(const DataTypes & /*arguments*/) const override
{
return std::make_shared<DataTypeUInt64>();

View File

@ -25,7 +25,7 @@ namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_COLUMN;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -53,7 +53,7 @@ namespace
DataTypePtr getReturnTypeImpl(const ColumnsWithTypeAndName & arguments) const override
{
if (arguments.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least one argument.", getName());
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} requires at least one argument.", getName());
for (const auto & arg : arguments)
if (!isString(arg.type))

View File

@ -18,9 +18,10 @@ namespace DB
namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int SIZES_OF_ARRAYS_DONT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
}
namespace
@ -64,19 +65,19 @@ public:
{
size_t arguments_size = arguments.size();
if (arguments_size < 2)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be at least 2",
getName(),
arguments_size);
Names nested_names = extractNestedNames(arguments[0].column);
if (nested_names.empty())
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"First argument for function {} must be constant column with array of strings",
getName());
if (nested_names.size() != arguments_size - 1)
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Size of nested names array for function {} does not match arrays arguments size. Actual {}. Expected {}",
getName(),
nested_names.size(),

View File

@ -18,7 +18,7 @@ namespace ErrorCodes
{
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int CANNOT_CLOCK_GETTIME;
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
}
namespace
@ -128,7 +128,7 @@ public:
if (arguments.size() > 2)
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Arguments size of function {} should be 0, or 1, or 2", getName());
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Arguments size of function {} should be 0, or 1, or 2", getName());
}
if (!arguments.empty())
{

View File

@ -12,7 +12,7 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
}
@ -63,7 +63,7 @@ public:
{
if (arguments.size() > 1)
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Arguments size of function {} should be 0 or 1", getName());
throw Exception(ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION, "Arguments size of function {} should be 0 or 1", getName());
}
if (arguments.size() == 1 && !isStringOrFixedString(arguments[0].type))
{

View File

@ -11,7 +11,8 @@ namespace DB
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int TOO_MANY_ARGUMENTS_FOR_FUNCTION;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int BAD_ARGUMENTS;
}
@ -117,14 +118,14 @@ namespace
{
if (arguments.empty())
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be 1.",
getName(),
arguments.size());
if (arguments.size() > 1)
throw Exception(
ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
ErrorCodes::TOO_MANY_ARGUMENTS_FOR_FUNCTION,
"Number of arguments for function {} doesn't match: passed {}, should be 1.",
getName(),
arguments.size());

View File

@ -37,7 +37,7 @@ namespace DB
{
namespace ErrorCodes
{
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
extern const int TOO_FEW_ARGUMENTS_FOR_FUNCTION;
extern const int BAD_ARGUMENTS;
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
extern const int ILLEGAL_COLUMN;
@ -87,7 +87,7 @@ public:
{
if (arguments.size() < 2)
{
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} requires at least 2 arguments", getName());
throw Exception(ErrorCodes::TOO_FEW_ARGUMENTS_FOR_FUNCTION, "Function {} requires at least 2 arguments", getName());
}
/** We allow function invocation in one of the following forms:

View File

@ -340,7 +340,7 @@ void FileSegment::setRemoteFileReader(RemoteFileReaderPtr remote_file_reader_)
remote_file_reader = remote_file_reader_;
}
void FileSegment::write(const char * from, size_t size, size_t offset)
void FileSegment::write(char * from, size_t size, size_t offset)
{
ProfileEventTimeIncrement<Microseconds> watch(ProfileEvents::FileSegmentWriteMicroseconds);
@ -389,16 +389,20 @@ void FileSegment::write(const char * from, size_t size, size_t offset)
try
{
if (!cache_writer)
cache_writer = std::make_unique<WriteBufferFromFile>(file_segment_path);
#ifdef ABORT_ON_LOGICAL_ERROR
/// This mutex is only needed to have a valid assertion in assertCacheCorrectness(),
/// which is only executed in debug/sanitizer builds (under ABORT_ON_LOGICAL_ERROR).
std::lock_guard lock(write_mutex);
#endif
cache_writer->write(from, size);
if (!cache_writer)
cache_writer = std::make_unique<WriteBufferFromFile>(file_segment_path, /* buf_size */0);
/// Size is equal to offset as offset for write buffer points to data end.
cache_writer->set(from, size, /* offset */size);
/// Reset the buffer when finished.
SCOPE_EXIT({ cache_writer->set(nullptr, 0); });
/// Flush the buffer.
cache_writer->next();
downloaded_size += size;

View File

@ -204,7 +204,7 @@ public:
bool reserve(size_t size_to_reserve, size_t lock_wait_timeout_milliseconds, FileCacheReserveStat * reserve_stat = nullptr);
/// Write data into reserved space.
void write(const char * from, size_t size, size_t offset);
void write(char * from, size_t size, size_t offset);
// Invariant: if state() != DOWNLOADING and remote file reader is present, the reader's
// available() == 0, and getFileOffsetOfBufferEnd() == our getCurrentWriteOffset().

View File

@ -2498,7 +2498,7 @@ AsyncLoader & Context::getAsyncLoader() const
shared->async_loader = std::make_unique<AsyncLoader>(std::vector<AsyncLoader::PoolInitializer>{
// IMPORTANT: Pool declaration order should match the order in `PoolId.h` to get the indices right.
{ // TablesLoaderForegroundPoolId
"FgLoad",
"ForegroundLoad",
CurrentMetrics::TablesLoaderForegroundThreads,
CurrentMetrics::TablesLoaderForegroundThreadsActive,
CurrentMetrics::TablesLoaderForegroundThreadsScheduled,
@ -2506,7 +2506,7 @@ AsyncLoader & Context::getAsyncLoader() const
TablesLoaderForegroundPriority
},
{ // TablesLoaderBackgroundLoadPoolId
"BgLoad",
"BackgroundLoad",
CurrentMetrics::TablesLoaderBackgroundThreads,
CurrentMetrics::TablesLoaderBackgroundThreadsActive,
CurrentMetrics::TablesLoaderBackgroundThreadsScheduled,
@ -2514,7 +2514,7 @@ AsyncLoader & Context::getAsyncLoader() const
TablesLoaderBackgroundLoadPriority
},
{ // TablesLoaderBackgroundStartupPoolId
"BgStartup",
"BackgrndStartup",
CurrentMetrics::TablesLoaderBackgroundThreads,
CurrentMetrics::TablesLoaderBackgroundThreadsActive,
CurrentMetrics::TablesLoaderBackgroundThreadsScheduled,

View File

@ -29,7 +29,7 @@ GinFilterParameters::GinFilterParameters(size_t ngrams_, UInt64 max_rows_per_pos
max_rows_per_postings_list = std::numeric_limits<UInt64>::max();
if (ngrams > 8)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The size of inverted index filter cannot be greater than 8");
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The size of full-text index filter cannot be greater than 8");
}
GinFilter::GinFilter(const GinFilterParameters & params_)

View File

@ -2,12 +2,11 @@
#include <Storages/MergeTree/GinIndexStore.h>
#include <vector>
#include <memory>
namespace DB
{
static inline constexpr auto INVERTED_INDEX_NAME = "inverted";
static inline constexpr auto FULL_TEXT_INDEX_NAME = "full_text";
static inline constexpr UInt64 UNLIMITED_ROWS_PER_POSTINGS_LIST = 0;
static inline constexpr UInt64 MIN_ROWS_PER_POSTINGS_LIST = 8 * 1024;
static inline constexpr UInt64 DEFAULT_MAX_ROWS_PER_POSTINGS_LIST = 64 * 1024;
@ -34,7 +33,7 @@ struct GinSegmentWithRowIdRange
using GinSegmentWithRowIdRangeVector = std::vector<GinSegmentWithRowIdRange>;
/// GinFilter provides underlying functionalities for building inverted index and also
/// GinFilter provides underlying functionalities for building full-text index and also
/// it does filtering the unmatched rows according to its query string.
/// It also builds and uses skipping index which stores (segmentID, RowIDStart, RowIDEnd) triples.
class GinFilter
@ -44,7 +43,7 @@ public:
explicit GinFilter(const GinFilterParameters & params_);
/// Add term (located at 'data' with length 'len') and its row ID to the postings list builder
/// for building inverted index for the given store.
/// for building full-text index for the given store.
void add(const char * data, size_t len, UInt32 rowID, GinIndexStorePtr & store) const;
/// Accumulate (segmentID, RowIDStart, RowIDEnd) for building skipping index

View File

@ -748,15 +748,12 @@ InterpreterCreateQuery::TableProperties InterpreterCreateQuery::getTableProperti
IndexDescription index_desc = IndexDescription::getIndexFromAST(index->clone(), properties.columns, getContext());
if (properties.indices.has(index_desc.name))
throw Exception(ErrorCodes::ILLEGAL_INDEX, "Duplicated index name {} is not allowed. Please use different index names.", backQuoteIfNeed(index_desc.name));
const auto & settings = getContext()->getSettingsRef();
if (index_desc.type == INVERTED_INDEX_NAME && !settings.allow_experimental_inverted_index)
{
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED,
"Experimental Inverted Index feature is not enabled (the setting 'allow_experimental_inverted_index')");
}
if (index_desc.type == FULL_TEXT_INDEX_NAME && !settings.allow_experimental_inverted_index)
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Experimental full-text index feature is not enabled (the setting 'allow_experimental_inverted_index')");
if (index_desc.type == "annoy" && !settings.allow_experimental_annoy_index)
throw Exception(ErrorCodes::INCORRECT_QUERY, "Annoy index is disabled. Turn on allow_experimental_annoy_index");
if (index_desc.type == "usearch" && !settings.allow_experimental_usearch_index)
throw Exception(ErrorCodes::INCORRECT_QUERY, "USearch index is disabled. Turn on allow_experimental_usearch_index");

Some files were not shown because too many files have changed in this diff Show More