mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-25 17:12:03 +00:00
Merge remote-tracking branch 'upstream/master' into better-tests-for-data-lakes
This commit is contained in:
commit
6f53784f22
7
.github/workflows/backport_branches.yml
vendored
7
.github/workflows/backport_branches.yml
vendored
@ -349,6 +349,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
submodules: true
|
submodules: true
|
||||||
|
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||||
|
run: |
|
||||||
|
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||||
|
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||||
|
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||||
|
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||||
|
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
7
.github/workflows/master.yml
vendored
7
.github/workflows/master.yml
vendored
@ -487,6 +487,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
submodules: true
|
submodules: true
|
||||||
|
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||||
|
run: |
|
||||||
|
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||||
|
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||||
|
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||||
|
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||||
|
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
7
.github/workflows/pull_request.yml
vendored
7
.github/workflows/pull_request.yml
vendored
@ -550,6 +550,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
submodules: true
|
submodules: true
|
||||||
|
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||||
|
run: |
|
||||||
|
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||||
|
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||||
|
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||||
|
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||||
|
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
7
.github/workflows/release_branches.yml
vendored
7
.github/workflows/release_branches.yml
vendored
@ -406,6 +406,13 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
clear-repository: true
|
clear-repository: true
|
||||||
submodules: true
|
submodules: true
|
||||||
|
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||||
|
run: |
|
||||||
|
rm -rf "$GITHUB_WORKSPACE/contrib" && echo 'removed'
|
||||||
|
git -C "$GITHUB_WORKSPACE" checkout . && echo 'restored'
|
||||||
|
"$GITHUB_WORKSPACE/contrib/update-submodules.sh" && echo 'OK'
|
||||||
|
du -hs "$GITHUB_WORKSPACE/contrib" ||:
|
||||||
|
find "$GITHUB_WORKSPACE/contrib" -type f | wc -l ||:
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
@ -58,7 +58,7 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
|
|||||||
set (RLIMIT_CPU 1000)
|
set (RLIMIT_CPU 1000)
|
||||||
|
|
||||||
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
# gcc10/gcc10/clang -fsanitize=memory is too heavy
|
||||||
if (SANITIZE STREQUAL "memory" OR COMPILER_GCC)
|
if (SANITIZE STREQUAL "memory")
|
||||||
set (RLIMIT_DATA 10000000000) # 10G
|
set (RLIMIT_DATA 10000000000) # 10G
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -286,48 +286,31 @@ set (CMAKE_C_STANDARD 11)
|
|||||||
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
set (CMAKE_C_EXTENSIONS ON) # required by most contribs written in C
|
||||||
set (CMAKE_C_STANDARD_REQUIRED ON)
|
set (CMAKE_C_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
|
||||||
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
|
||||||
# See https://reviews.llvm.org/D112921
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
|
|
||||||
# benchmarks.
|
|
||||||
if (COMPILER_GCC OR COMPILER_CLANG)
|
|
||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (ARCH_AMD64)
|
|
||||||
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
|
|
||||||
# which makes benchmark results more stable.
|
|
||||||
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
|
|
||||||
if (COMPILER_GCC)
|
|
||||||
# gcc is in assembler, need to add "-Wa," prefix
|
|
||||||
set(BRANCHES_WITHIN_32B_BOUNDARIES "-Wa,${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcoroutines")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
# Compiler-specific coverage flags e.g. -fcoverage-mapping for gcc
|
||||||
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
option(WITH_COVERAGE "Profile the resulting binary/binaries" OFF)
|
||||||
|
|
||||||
if (WITH_COVERAGE AND COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
# Enable C++14 sized global deallocation functions. It should be enabled by setting -std=c++14 but I'm not sure.
|
||||||
# If we want to disable coverage for specific translation units
|
# See https://reviews.llvm.org/D112921
|
||||||
set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsized-deallocation")
|
||||||
endif()
|
|
||||||
|
|
||||||
if (WITH_COVERAGE AND COMPILER_GCC)
|
# falign-functions=32 prevents from random performance regressions with the code change. Thus, providing more stable
|
||||||
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-arcs -ftest-coverage")
|
# benchmarks.
|
||||||
set(COVERAGE_OPTION "-lgcov")
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} -falign-functions=32")
|
||||||
set(WITHOUT_COVERAGE "-fno-profile-arcs -fno-test-coverage")
|
|
||||||
endif()
|
if (ARCH_AMD64)
|
||||||
|
# align branches within a 32-Byte boundary to avoid the potential performance loss when code layout change,
|
||||||
|
# which makes benchmark results more stable.
|
||||||
|
set(BRANCHES_WITHIN_32B_BOUNDARIES "-mbranches-within-32B-boundaries")
|
||||||
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} ${BRANCHES_WITHIN_32B_BOUNDARIES}")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if (WITH_COVERAGE)
|
||||||
|
set(COMPILER_FLAGS "${COMPILER_FLAGS} -fprofile-instr-generate -fcoverage-mapping")
|
||||||
|
# If we want to disable coverage for specific translation units
|
||||||
|
set(WITHOUT_COVERAGE "-fno-profile-instr-generate -fno-coverage-mapping")
|
||||||
|
endif()
|
||||||
|
endif ()
|
||||||
|
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS}")
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
[![ClickHouse — open source distributed column-oriented DBMS](https://github.com/ClickHouse/clickhouse-presentations/raw/master/images/logo-400x240.png)](https://clickhouse.com)
|
[<img alt="ClickHouse — open source distributed column-oriented DBMS" width="400px" src="https://clickhouse.com/images/ch_gh_logo_rounded.png" />](https://clickhouse.com?utm_source=github)
|
||||||
|
|
||||||
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
|
ClickHouse® is an open-source column-oriented database management system that allows generating analytical data reports in real-time.
|
||||||
|
|
||||||
|
@ -155,13 +155,13 @@ struct common_type<wide::integer<Bits, Signed>, Arithmetic>
|
|||||||
std::is_floating_point_v<Arithmetic>,
|
std::is_floating_point_v<Arithmetic>,
|
||||||
Arithmetic,
|
Arithmetic,
|
||||||
std::conditional_t<
|
std::conditional_t<
|
||||||
sizeof(Arithmetic) < Bits * sizeof(long),
|
sizeof(Arithmetic) * 8 < Bits,
|
||||||
wide::integer<Bits, Signed>,
|
wide::integer<Bits, Signed>,
|
||||||
std::conditional_t<
|
std::conditional_t<
|
||||||
Bits * sizeof(long) < sizeof(Arithmetic),
|
Bits < sizeof(Arithmetic) * 8,
|
||||||
Arithmetic,
|
Arithmetic,
|
||||||
std::conditional_t<
|
std::conditional_t<
|
||||||
Bits * sizeof(long) == sizeof(Arithmetic) && (std::is_same_v<Signed, signed> || std::is_signed_v<Arithmetic>),
|
Bits == sizeof(Arithmetic) * 8 && (std::is_same_v<Signed, signed> || std::is_signed_v<Arithmetic>),
|
||||||
Arithmetic,
|
Arithmetic,
|
||||||
wide::integer<Bits, Signed>>>>>;
|
wide::integer<Bits, Signed>>>>>;
|
||||||
};
|
};
|
||||||
|
@ -2,13 +2,6 @@ set (DEFAULT_LIBS "-nodefaultlibs")
|
|||||||
|
|
||||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl")
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${COVERAGE_OPTION} -lc -lm -lpthread -ldl")
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
|
||||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc_eh")
|
|
||||||
if (ARCH_AARCH64)
|
|
||||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} -lgcc")
|
|
||||||
endif ()
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||||
|
|
||||||
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
set(CMAKE_CXX_STANDARD_LIBRARIES ${DEFAULT_LIBS})
|
||||||
|
@ -1,9 +1,5 @@
|
|||||||
set (DEFAULT_LIBS "-nodefaultlibs")
|
set (DEFAULT_LIBS "-nodefaultlibs")
|
||||||
|
|
||||||
if (NOT COMPILER_CLANG)
|
|
||||||
message (FATAL_ERROR "FreeBSD build is supported only for Clang")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "amd64")
|
if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "amd64")
|
||||||
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-x86_64.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
execute_process (COMMAND ${CMAKE_CXX_COMPILER} --print-file-name=libclang_rt.builtins-x86_64.a OUTPUT_VARIABLE BUILTINS_LIBRARY OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||||
else ()
|
else ()
|
||||||
|
@ -11,8 +11,6 @@ if (COMPILER_CLANG)
|
|||||||
if (NOT EXISTS "${BUILTINS_LIBRARY}")
|
if (NOT EXISTS "${BUILTINS_LIBRARY}")
|
||||||
set (BUILTINS_LIBRARY "-lgcc")
|
set (BUILTINS_LIBRARY "-lgcc")
|
||||||
endif ()
|
endif ()
|
||||||
else ()
|
|
||||||
set (BUILTINS_LIBRARY "-lgcc")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (OS_ANDROID)
|
if (OS_ANDROID)
|
||||||
|
@ -8,12 +8,6 @@ option (SANITIZE "Enable one of the code sanitizers" "")
|
|||||||
|
|
||||||
set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER")
|
set (SAN_FLAGS "${SAN_FLAGS} -g -fno-omit-frame-pointer -DSANITIZER")
|
||||||
|
|
||||||
# gcc with -nodefaultlibs does not add sanitizer libraries
|
|
||||||
# with -static-libasan and similar
|
|
||||||
macro(add_explicit_sanitizer_library lib)
|
|
||||||
target_link_libraries(global-libs INTERFACE "-Wl,-static -l${lib} -Wl,-Bdynamic")
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
if (SANITIZE)
|
if (SANITIZE)
|
||||||
if (SANITIZE STREQUAL "address")
|
if (SANITIZE STREQUAL "address")
|
||||||
# LLVM-15 has a bug in Address Sanitizer, preventing the usage of 'sanitize-address-use-after-scope',
|
# LLVM-15 has a bug in Address Sanitizer, preventing the usage of 'sanitize-address-use-after-scope',
|
||||||
@ -28,9 +22,6 @@ if (SANITIZE)
|
|||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
|
||||||
endif ()
|
endif ()
|
||||||
if (COMPILER_GCC)
|
|
||||||
add_explicit_sanitizer_library(asan)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
elseif (SANITIZE STREQUAL "memory")
|
elseif (SANITIZE STREQUAL "memory")
|
||||||
# MemorySanitizer flags are set according to the official documentation:
|
# MemorySanitizer flags are set according to the official documentation:
|
||||||
@ -58,11 +49,6 @@ if (SANITIZE)
|
|||||||
set (TSAN_FLAGS "-fsanitize=thread")
|
set (TSAN_FLAGS "-fsanitize=thread")
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
set (TSAN_FLAGS "${TSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt")
|
||||||
else()
|
|
||||||
set (MESSAGE "TSAN suppressions was not passed to the compiler (since the compiler is not clang)\n")
|
|
||||||
set (MESSAGE "${MESSAGE}Use the following command to pass them manually:\n")
|
|
||||||
set (MESSAGE "${MESSAGE} export TSAN_OPTIONS=\"$TSAN_OPTIONS suppressions=${CMAKE_SOURCE_DIR}/tests/tsan_suppressions.txt\"")
|
|
||||||
message (WARNING "${MESSAGE}")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
||||||
@ -74,9 +60,6 @@ if (SANITIZE)
|
|||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan")
|
||||||
endif ()
|
endif ()
|
||||||
if (COMPILER_GCC)
|
|
||||||
add_explicit_sanitizer_library(tsan)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
elseif (SANITIZE STREQUAL "undefined")
|
elseif (SANITIZE STREQUAL "undefined")
|
||||||
set (UBSAN_FLAGS "-fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero")
|
set (UBSAN_FLAGS "-fsanitize=undefined -fno-sanitize-recover=all -fno-sanitize=float-divide-by-zero")
|
||||||
@ -91,11 +74,6 @@ if (SANITIZE)
|
|||||||
endif()
|
endif()
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
set (UBSAN_FLAGS "${UBSAN_FLAGS} -fsanitize-blacklist=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt")
|
||||||
else()
|
|
||||||
set (MESSAGE "UBSAN suppressions was not passed to the compiler (since the compiler is not clang)\n")
|
|
||||||
set (MESSAGE "${MESSAGE}Use the following command to pass them manually:\n")
|
|
||||||
set (MESSAGE "${MESSAGE} export UBSAN_OPTIONS=\"$UBSAN_OPTIONS suppressions=${CMAKE_SOURCE_DIR}/tests/ubsan_suppressions.txt\"")
|
|
||||||
message (WARNING "${MESSAGE}")
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SAN_FLAGS} ${UBSAN_FLAGS}")
|
||||||
@ -106,9 +84,6 @@ if (SANITIZE)
|
|||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
|
||||||
endif ()
|
endif ()
|
||||||
if (COMPILER_GCC)
|
|
||||||
add_explicit_sanitizer_library(ubsan)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
|
# llvm-tblgen, that is used during LLVM build, doesn't work with UBSan.
|
||||||
set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "")
|
set (ENABLE_EMBEDDED_COMPILER 0 CACHE BOOL "")
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
# Compiler
|
# Compiler
|
||||||
|
|
||||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||||
set (COMPILER_GCC 1)
|
|
||||||
elseif (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
|
||||||
set (COMPILER_CLANG 1) # Safe to treat AppleClang as a regular Clang, in general.
|
set (COMPILER_CLANG 1) # Safe to treat AppleClang as a regular Clang, in general.
|
||||||
elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
|
||||||
set (COMPILER_CLANG 1)
|
set (COMPILER_CLANG 1)
|
||||||
@ -18,16 +16,8 @@ message (STATUS "Using compiler:\n${COMPILER_SELF_IDENTIFICATION}")
|
|||||||
set (CLANG_MINIMUM_VERSION 15)
|
set (CLANG_MINIMUM_VERSION 15)
|
||||||
set (XCODE_MINIMUM_VERSION 12.0)
|
set (XCODE_MINIMUM_VERSION 12.0)
|
||||||
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
|
set (APPLE_CLANG_MINIMUM_VERSION 12.0.0)
|
||||||
set (GCC_MINIMUM_VERSION 11)
|
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
message (FATAL_ERROR "Compilation with GCC is unsupported. Please use Clang instead.")
|
|
||||||
|
|
||||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${GCC_MINIMUM_VERSION})
|
|
||||||
message (FATAL_ERROR "Compilation with GCC version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, the minimum required version is ${GCC_MINIMUM_VERSION}.")
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
elseif (COMPILER_CLANG)
|
|
||||||
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
if (CMAKE_CXX_COMPILER_ID MATCHES "AppleClang")
|
||||||
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
|
# (Experimental!) Specify "-DALLOW_APPLECLANG=ON" when running CMake configuration step, if you want to experiment with using it.
|
||||||
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
|
if (NOT ALLOW_APPLECLANG AND NOT DEFINED ENV{ALLOW_APPLECLANG})
|
||||||
@ -58,9 +48,7 @@ if (LINKER_NAME MATCHES "gold")
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT LINKER_NAME)
|
if (NOT LINKER_NAME)
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
find_program (LLD_PATH NAMES "ld.lld")
|
|
||||||
elseif (COMPILER_CLANG)
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
if (NOT ARCH_S390X) # s390x doesnt support lld
|
if (NOT ARCH_S390X) # s390x doesnt support lld
|
||||||
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
|
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
|
||||||
@ -69,10 +57,7 @@ if (NOT LINKER_NAME)
|
|||||||
endif ()
|
endif ()
|
||||||
if (OS_LINUX)
|
if (OS_LINUX)
|
||||||
if (LLD_PATH)
|
if (LLD_PATH)
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
# GCC driver requires one of supported linker names like "lld".
|
|
||||||
set (LINKER_NAME "lld")
|
|
||||||
else ()
|
|
||||||
# Clang driver simply allows full linker path.
|
# Clang driver simply allows full linker path.
|
||||||
set (LINKER_NAME ${LLD_PATH})
|
set (LINKER_NAME ${LLD_PATH})
|
||||||
endif ()
|
endif ()
|
||||||
@ -91,8 +76,6 @@ if (LINKER_NAME)
|
|||||||
configure_file ("${CMAKE_CURRENT_SOURCE_DIR}/cmake/ld.lld.in" "${LLD_WRAPPER}" @ONLY)
|
configure_file ("${CMAKE_CURRENT_SOURCE_DIR}/cmake/ld.lld.in" "${LLD_WRAPPER}" @ONLY)
|
||||||
|
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_WRAPPER}")
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --ld-path=${LLD_WRAPPER}")
|
||||||
else ()
|
|
||||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
|
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
endif ()
|
endif ()
|
||||||
@ -105,9 +88,7 @@ endif()
|
|||||||
|
|
||||||
# Archiver
|
# Archiver
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
find_program (LLVM_AR_PATH NAMES "llvm-ar" "llvm-ar-15" "llvm-ar-14" "llvm-ar-13" "llvm-ar-12")
|
|
||||||
else ()
|
|
||||||
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
|
find_program (LLVM_AR_PATH NAMES "llvm-ar-${COMPILER_VERSION_MAJOR}" "llvm-ar")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -119,9 +100,7 @@ message(STATUS "Using archiver: ${CMAKE_AR}")
|
|||||||
|
|
||||||
# Ranlib
|
# Ranlib
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib" "llvm-ranlib-15" "llvm-ranlib-14" "llvm-ranlib-13" "llvm-ranlib-12")
|
|
||||||
else ()
|
|
||||||
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
|
find_program (LLVM_RANLIB_PATH NAMES "llvm-ranlib-${COMPILER_VERSION_MAJOR}" "llvm-ranlib")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -133,9 +112,7 @@ message(STATUS "Using ranlib: ${CMAKE_RANLIB}")
|
|||||||
|
|
||||||
# Install Name Tool
|
# Install Name Tool
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool" "llvm-install-name-tool-15" "llvm-install-name-tool-14" "llvm-install-name-tool-13" "llvm-install-name-tool-12")
|
|
||||||
else ()
|
|
||||||
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
|
find_program (LLVM_INSTALL_NAME_TOOL_PATH NAMES "llvm-install-name-tool-${COMPILER_VERSION_MAJOR}" "llvm-install-name-tool")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -147,9 +124,7 @@ message(STATUS "Using install-name-tool: ${CMAKE_INSTALL_NAME_TOOL}")
|
|||||||
|
|
||||||
# Objcopy
|
# Objcopy
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy" "llvm-objcopy-15" "llvm-objcopy-14" "llvm-objcopy-13" "llvm-objcopy-12" "objcopy")
|
|
||||||
else ()
|
|
||||||
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
|
find_program (OBJCOPY_PATH NAMES "llvm-objcopy-${COMPILER_VERSION_MAJOR}" "llvm-objcopy" "objcopy")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
@ -161,9 +136,7 @@ endif ()
|
|||||||
|
|
||||||
# Strip
|
# Strip
|
||||||
|
|
||||||
if (COMPILER_GCC)
|
if (COMPILER_CLANG)
|
||||||
find_program (STRIP_PATH NAMES "llvm-strip" "llvm-strip-15" "llvm-strip-14" "llvm-strip-13" "llvm-strip-12" "strip")
|
|
||||||
else ()
|
|
||||||
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
|
find_program (STRIP_PATH NAMES "llvm-strip-${COMPILER_VERSION_MAJOR}" "llvm-strip" "strip")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
@ -47,115 +47,4 @@ if (COMPILER_CLANG)
|
|||||||
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
|
no_warning(enum-constexpr-conversion) # breaks magic-enum library in clang-16
|
||||||
no_warning(unsafe-buffer-usage) # too aggressive
|
no_warning(unsafe-buffer-usage) # too aggressive
|
||||||
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
# TODO Enable conversion, sign-conversion, double-promotion warnings.
|
||||||
elseif (COMPILER_GCC)
|
|
||||||
# Add compiler options only to c++ compiler
|
|
||||||
function(add_cxx_compile_options option)
|
|
||||||
add_compile_options("$<$<STREQUAL:$<TARGET_PROPERTY:LINKER_LANGUAGE>,CXX>:${option}>")
|
|
||||||
endfunction()
|
|
||||||
# Warn about boolean expression compared with an integer value different from true/false
|
|
||||||
add_cxx_compile_options(-Wbool-compare)
|
|
||||||
# Warn whenever a pointer is cast such that the required alignment of the target is increased.
|
|
||||||
add_cxx_compile_options(-Wcast-align)
|
|
||||||
# Warn whenever a pointer is cast so as to remove a type qualifier from the target type.
|
|
||||||
add_cxx_compile_options(-Wcast-qual)
|
|
||||||
# Warn when deleting a pointer to incomplete type, which may cause undefined behavior at runtime
|
|
||||||
add_cxx_compile_options(-Wdelete-incomplete)
|
|
||||||
# Warn if a requested optimization pass is disabled. Code is too big or too complex
|
|
||||||
add_cxx_compile_options(-Wdisabled-optimization)
|
|
||||||
# Warn about duplicated conditions in an if-else-if chain
|
|
||||||
add_cxx_compile_options(-Wduplicated-cond)
|
|
||||||
# Warn about a comparison between values of different enumerated types
|
|
||||||
add_cxx_compile_options(-Wenum-compare)
|
|
||||||
# Warn about uninitialized variables that are initialized with themselves
|
|
||||||
add_cxx_compile_options(-Winit-self)
|
|
||||||
# Warn about logical not used on the left hand side operand of a comparison
|
|
||||||
add_cxx_compile_options(-Wlogical-not-parentheses)
|
|
||||||
# Warn about suspicious uses of logical operators in expressions
|
|
||||||
add_cxx_compile_options(-Wlogical-op)
|
|
||||||
# Warn if there exists a path from the function entry to a use of the variable that is uninitialized.
|
|
||||||
add_cxx_compile_options(-Wmaybe-uninitialized)
|
|
||||||
# Warn when the indentation of the code does not reflect the block structure
|
|
||||||
add_cxx_compile_options(-Wmisleading-indentation)
|
|
||||||
# Warn if a global function is defined without a previous declaration - disabled because of build times
|
|
||||||
# add_cxx_compile_options(-Wmissing-declarations)
|
|
||||||
# Warn if a user-supplied include directory does not exist
|
|
||||||
add_cxx_compile_options(-Wmissing-include-dirs)
|
|
||||||
# Obvious
|
|
||||||
add_cxx_compile_options(-Wnon-virtual-dtor)
|
|
||||||
# Obvious
|
|
||||||
add_cxx_compile_options(-Wno-return-local-addr)
|
|
||||||
# This warning is disabled due to false positives if compiled with libc++: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90037
|
|
||||||
#add_cxx_compile_options(-Wnull-dereference)
|
|
||||||
# Obvious
|
|
||||||
add_cxx_compile_options(-Wodr)
|
|
||||||
# Obvious
|
|
||||||
add_cxx_compile_options(-Wold-style-cast)
|
|
||||||
# Warn when a function declaration hides virtual functions from a base class
|
|
||||||
# add_cxx_compile_options(-Woverloaded-virtual)
|
|
||||||
# Warn about placement new expressions with undefined behavior
|
|
||||||
add_cxx_compile_options(-Wplacement-new=2)
|
|
||||||
# Warn about anything that depends on the “size of” a function type or of void
|
|
||||||
add_cxx_compile_options(-Wpointer-arith)
|
|
||||||
# Warn if anything is declared more than once in the same scope
|
|
||||||
add_cxx_compile_options(-Wredundant-decls)
|
|
||||||
# Member initialization reordering
|
|
||||||
add_cxx_compile_options(-Wreorder)
|
|
||||||
# Obvious
|
|
||||||
add_cxx_compile_options(-Wshadow)
|
|
||||||
# Warn if left shifting a negative value
|
|
||||||
add_cxx_compile_options(-Wshift-negative-value)
|
|
||||||
# Warn about a definition of an unsized deallocation function
|
|
||||||
add_cxx_compile_options(-Wsized-deallocation)
|
|
||||||
# Warn when the sizeof operator is applied to a parameter that is declared as an array in a function definition
|
|
||||||
add_cxx_compile_options(-Wsizeof-array-argument)
|
|
||||||
# Warn for suspicious length parameters to certain string and memory built-in functions if the argument uses sizeof
|
|
||||||
add_cxx_compile_options(-Wsizeof-pointer-memaccess)
|
|
||||||
# Warn about overriding virtual functions that are not marked with the override keyword
|
|
||||||
add_cxx_compile_options(-Wsuggest-override)
|
|
||||||
# Warn whenever a switch statement has an index of boolean type and the case values are outside the range of a boolean type
|
|
||||||
add_cxx_compile_options(-Wswitch-bool)
|
|
||||||
# Warn if a self-comparison always evaluates to true or false
|
|
||||||
add_cxx_compile_options(-Wtautological-compare)
|
|
||||||
# Warn about trampolines generated for pointers to nested functions
|
|
||||||
add_cxx_compile_options(-Wtrampolines)
|
|
||||||
# Obvious
|
|
||||||
add_cxx_compile_options(-Wunused)
|
|
||||||
add_cxx_compile_options(-Wundef)
|
|
||||||
# Warn if vector operation is not implemented via SIMD capabilities of the architecture
|
|
||||||
add_cxx_compile_options(-Wvector-operation-performance)
|
|
||||||
# Warn when a literal 0 is used as null pointer constant.
|
|
||||||
add_cxx_compile_options(-Wzero-as-null-pointer-constant)
|
|
||||||
|
|
||||||
# The following warnings are generally useful but had to be disabled because of compiler bugs with older GCCs.
|
|
||||||
# XXX: We should try again on more recent GCCs (--> see CMake variable GCC_MINIMUM_VERSION).
|
|
||||||
|
|
||||||
# gcc10 stuck with this option while compiling GatherUtils code, anyway there are builds with clang that will warn
|
|
||||||
add_cxx_compile_options(-Wno-sequence-point)
|
|
||||||
# gcc10 false positive with this warning in MergeTreePartition.cpp
|
|
||||||
# inlined from 'void writeHexByteLowercase(UInt8, void*)' at ../src/Common/hex.h:39:11,
|
|
||||||
# inlined from 'DB::String DB::MergeTreePartition::getID(const DB::Block&) const' at ../src/Storages/MergeTree/MergeTreePartition.cpp:85:30:
|
|
||||||
# ../contrib/libc-headers/x86_64-linux-gnu/bits/string_fortified.h:34:33: error: writing 2 bytes into a region of size 0 [-Werror=stringop-overflow=]
|
|
||||||
# 34 | return __builtin___memcpy_chk (__dest, __src, __len, __bos0 (__dest));
|
|
||||||
# For some reason (bug in gcc?) macro 'GCC diagnostic ignored "-Wstringop-overflow"' doesn't help.
|
|
||||||
add_cxx_compile_options(-Wno-stringop-overflow)
|
|
||||||
# reinterpretAs.cpp:182:31: error: ‘void* memcpy(void*, const void*, size_t)’ copying an object of non-trivial type
|
|
||||||
# ‘using ToFieldType = using FieldType = using UUID = struct StrongTypedef<wide::integer<128, unsigned int>, DB::UUIDTag>’
|
|
||||||
# {aka ‘struct StrongTypedef<wide::integer<128, unsigned int>, DB::UUIDTag>’} from an array of ‘const char8_t’
|
|
||||||
add_cxx_compile_options(-Wno-error=class-memaccess)
|
|
||||||
# Maybe false positive...
|
|
||||||
# In file included from /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/memory:673,
|
|
||||||
# In function ‘void std::__1::__libcpp_operator_delete(_Args ...) [with _Args = {void*, long unsigned int}]’,
|
|
||||||
# inlined from ‘void std::__1::__do_deallocate_handle_size(void*, size_t, _Args ...) [with _Args = {}]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/new:271:34,
|
|
||||||
# inlined from ‘void std::__1::__libcpp_deallocate(void*, size_t, size_t)’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/new:285:41,
|
|
||||||
# inlined from ‘constexpr void std::__1::allocator<_Tp>::deallocate(_Tp*, size_t) [with _Tp = char]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/memory:849:39,
|
|
||||||
# inlined from ‘static constexpr void std::__1::allocator_traits<_Alloc>::deallocate(std::__1::allocator_traits<_Alloc>::allocator_type&, std::__1::allocator_traits<_Alloc>::pointer, std::__1::allocator_traits<_Alloc>::size_type) [with _Alloc = std::__1::allocator<char>]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/__memory/allocator_traits.h:476:24,
|
|
||||||
# inlined from ‘std::__1::basic_string<_CharT, _Traits, _Allocator>::~basic_string() [with _CharT = char; _Traits = std::__1::char_traits<char>; _Allocator = std::__1::allocator<char>]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/string:2219:35,
|
|
||||||
# inlined from ‘std::__1::basic_string<_CharT, _Traits, _Allocator>::~basic_string() [with _CharT = char; _Traits = std::__1::char_traits<char>; _Allocator = std::__1::allocator<char>]’ at /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/string:2213:1,
|
|
||||||
# inlined from ‘DB::JSONBuilder::JSONMap::Pair::~Pair()’ at /home/jakalletti/ClickHouse/ClickHouse/src/Common/JSONBuilder.h:90:12,
|
|
||||||
# inlined from ‘void DB::JSONBuilder::JSONMap::add(std::__1::string, DB::JSONBuilder::ItemPtr)’ at /home/jakalletti/ClickHouse/ClickHouse/src/Common/JSONBuilder.h:97:68,
|
|
||||||
# inlined from ‘virtual void DB::ExpressionStep::describeActions(DB::JSONBuilder::JSONMap&) const’ at /home/jakalletti/ClickHouse/ClickHouse/src/Processors/QueryPlan/ExpressionStep.cpp:102:12:
|
|
||||||
# /home/jakalletti/ClickHouse/ClickHouse/contrib/libcxx/include/new:247:20: error: ‘void operator delete(void*, size_t)’ called on a pointer to an unallocated object ‘7598543875853023301’ [-Werror=free-nonheap-object]
|
|
||||||
add_cxx_compile_options(-Wno-error=free-nonheap-object)
|
|
||||||
# AggregateFunctionAvg.h:203:100: error: ‘this’ pointer is null [-Werror=nonnull]
|
|
||||||
add_cxx_compile_options(-Wno-error=nonnull)
|
|
||||||
endif ()
|
endif ()
|
||||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -9,8 +9,6 @@ if (WITH_COVERAGE)
|
|||||||
# disable coverage for contib files and build with optimisations
|
# disable coverage for contib files and build with optimisations
|
||||||
if (COMPILER_CLANG)
|
if (COMPILER_CLANG)
|
||||||
add_compile_options(-O3 -DNDEBUG -finline-functions -finline-hint-functions ${WITHOUT_COVERAGE_LIST})
|
add_compile_options(-O3 -DNDEBUG -finline-functions -finline-hint-functions ${WITHOUT_COVERAGE_LIST})
|
||||||
else()
|
|
||||||
add_compile_options(-O3 -DNDEBUG -finline-functions ${WITHOUT_COVERAGE_LIST})
|
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
2
contrib/cctz
vendored
2
contrib/cctz
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 7c78edd52b4d65acc103c2f195818ffcabe6fe0d
|
Subproject commit 5e05432420f9692418e2e12aff09859e420b14a2
|
@ -1,9 +1,4 @@
|
|||||||
# disable grpc due to conflicts of abseil (required by grpc) dynamic annotations with libtsan.a
|
set(ENABLE_GRPC_DEFAULT ${ENABLE_LIBRARIES})
|
||||||
if (SANITIZE STREQUAL "thread" AND COMPILER_GCC)
|
|
||||||
set(ENABLE_GRPC_DEFAULT OFF)
|
|
||||||
else()
|
|
||||||
set(ENABLE_GRPC_DEFAULT ${ENABLE_LIBRARIES})
|
|
||||||
endif()
|
|
||||||
option(ENABLE_GRPC "Use gRPC" ${ENABLE_GRPC_DEFAULT})
|
option(ENABLE_GRPC "Use gRPC" ${ENABLE_GRPC_DEFAULT})
|
||||||
|
|
||||||
if(NOT ENABLE_GRPC)
|
if(NOT ENABLE_GRPC)
|
||||||
|
2
contrib/krb5
vendored
2
contrib/krb5
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 9453aec0d50e5aff9b189051611b321b40935d02
|
Subproject commit b56ce6ba690e1f320df1a64afa34980c3e462617
|
@ -15,10 +15,6 @@ if(NOT AWK_PROGRAM)
|
|||||||
message(FATAL_ERROR "You need the awk program to build ClickHouse with krb5 enabled.")
|
message(FATAL_ERROR "You need the awk program to build ClickHouse with krb5 enabled.")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC))
|
|
||||||
add_compile_definitions(USE_BORINGSSL=1)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
|
set(KRB5_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/krb5/src")
|
||||||
set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private")
|
set(KRB5_ET_BIN_DIR "${CMAKE_CURRENT_BINARY_DIR}/include_private")
|
||||||
|
|
||||||
@ -162,6 +158,11 @@ set(ALL_SRCS
|
|||||||
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/kdf.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/kdf.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/cmac.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/cmac.c"
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/des/des_keys.c"
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/des/f_parity.c"
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/enc_provider/rc4.c"
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/hash_provider/hash_md4.c"
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/md4/md4.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/krb/prng.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/krb/prng.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_cmac.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/krb/enc_dk_cmac.c"
|
||||||
# "${KRB5_SOURCE_DIR}/lib/crypto/krb/crc32.c"
|
# "${KRB5_SOURCE_DIR}/lib/crypto/krb/crc32.c"
|
||||||
@ -226,7 +227,6 @@ set(ALL_SRCS
|
|||||||
# "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des.c"
|
# "${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/rc4.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/rc4.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des3.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/des3.c"
|
||||||
#"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/camellia.c"
|
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/cmac.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/cmac.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/sha256.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/sha256.c"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/hmac.c"
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/hmac.c"
|
||||||
@ -474,6 +474,14 @@ set(ALL_SRCS
|
|||||||
"${KRB5_SOURCE_DIR}/lib/krb5/krb5_libinit.c"
|
"${KRB5_SOURCE_DIR}/lib/krb5/krb5_libinit.c"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if (NOT (ENABLE_OPENSSL OR ENABLE_OPENSSL_DYNAMIC))
|
||||||
|
add_compile_definitions(USE_BORINGSSL=1)
|
||||||
|
else()
|
||||||
|
set(ALL_SRCS ${ALL_SRCS}
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl/enc_provider/camellia.c"
|
||||||
|
)
|
||||||
|
endif()
|
||||||
|
|
||||||
add_custom_command(
|
add_custom_command(
|
||||||
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/compile_et"
|
||||||
COMMAND /bin/sh
|
COMMAND /bin/sh
|
||||||
@ -673,6 +681,7 @@ target_include_directories(_krb5 PRIVATE
|
|||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/krb5"
|
||||||
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego"
|
"${KRB5_SOURCE_DIR}/lib/gssapi/spnego"
|
||||||
"${KRB5_SOURCE_DIR}/util/et"
|
"${KRB5_SOURCE_DIR}/util/et"
|
||||||
|
"${KRB5_SOURCE_DIR}/lib/crypto/builtin/md4"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/openssl"
|
"${KRB5_SOURCE_DIR}/lib/crypto/openssl"
|
||||||
"${KRB5_SOURCE_DIR}/lib/crypto/krb"
|
"${KRB5_SOURCE_DIR}/lib/crypto/krb"
|
||||||
"${KRB5_SOURCE_DIR}/util/profile"
|
"${KRB5_SOURCE_DIR}/util/profile"
|
||||||
|
@ -69,11 +69,6 @@ if (USE_MUSL)
|
|||||||
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
target_compile_definitions(cxx PUBLIC -D_LIBCPP_HAS_MUSL_LIBC=1)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Override the deduced attribute support that causes error.
|
|
||||||
if (OS_DARWIN AND COMPILER_GCC)
|
|
||||||
add_compile_definitions(_LIBCPP_INIT_PRIORITY_MAX)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
target_compile_options(cxx PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++>)
|
target_compile_options(cxx PUBLIC $<$<COMPILE_LANGUAGE:CXX>:-nostdinc++>)
|
||||||
|
|
||||||
# Third party library may have substandard code.
|
# Third party library may have substandard code.
|
||||||
@ -84,11 +79,6 @@ target_compile_definitions(cxx PUBLIC -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS
|
|||||||
|
|
||||||
target_link_libraries(cxx PUBLIC cxxabi)
|
target_link_libraries(cxx PUBLIC cxxabi)
|
||||||
|
|
||||||
# For __udivmodti4, __divmodti4.
|
|
||||||
if (OS_DARWIN AND COMPILER_GCC)
|
|
||||||
target_link_libraries(cxx PRIVATE gcc)
|
|
||||||
endif ()
|
|
||||||
|
|
||||||
install(
|
install(
|
||||||
TARGETS cxx
|
TARGETS cxx
|
||||||
EXPORT global
|
EXPORT global
|
||||||
|
2
contrib/libhdfs3
vendored
2
contrib/libhdfs3
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 9ee3ce77215fca83b7fdfcfe2186a3db0d0bdb74
|
Subproject commit 3c91d96ff29fe5928f055519c6d979c4b104db9e
|
@ -1,9 +1,3 @@
|
|||||||
# once fixed, please remove similar places in CMakeLists of libuv users (search "ch_contrib::uv")
|
|
||||||
if (OS_DARWIN AND COMPILER_GCC)
|
|
||||||
message (WARNING "libuv cannot be built with GCC in macOS due to a bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=93082")
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# This file is a modified version of contrib/libuv/CMakeLists.txt
|
# This file is a modified version of contrib/libuv/CMakeLists.txt
|
||||||
|
|
||||||
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/libuv")
|
set (SOURCE_DIR "${CMAKE_SOURCE_DIR}/contrib/libuv")
|
||||||
|
19
contrib/sparse-checkout/setup-sparse-checkout.sh
Executable file
19
contrib/sparse-checkout/setup-sparse-checkout.sh
Executable file
@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
git config submodule."contrib/llvm-project".update '!../sparse-checkout/update-llvm-project.sh'
|
||||||
|
git config submodule."contrib/croaring".update '!../sparse-checkout/update-croaring.sh'
|
||||||
|
git config submodule."contrib/aws".update '!../sparse-checkout/update-aws.sh'
|
||||||
|
git config submodule."contrib/openssl".update '!../sparse-checkout/update-openssl.sh'
|
||||||
|
git config submodule."contrib/boringssl".update '!../sparse-checkout/update-boringssl.sh'
|
||||||
|
git config submodule."contrib/arrow".update '!../sparse-checkout/update-arrow.sh'
|
||||||
|
git config submodule."contrib/grpc".update '!../sparse-checkout/update-grpc.sh'
|
||||||
|
git config submodule."contrib/orc".update '!../sparse-checkout/update-orc.sh'
|
||||||
|
git config submodule."contrib/h3".update '!../sparse-checkout/update-h3.sh'
|
||||||
|
git config submodule."contrib/icu".update '!../sparse-checkout/update-icu.sh'
|
||||||
|
git config submodule."contrib/boost".update '!../sparse-checkout/update-boost.sh'
|
||||||
|
git config submodule."contrib/aws-s2n-tls".update '!../sparse-checkout/update-aws-s2n-tls.sh'
|
||||||
|
git config submodule."contrib/protobuf".update '!../sparse-checkout/update-protobuf.sh'
|
||||||
|
git config submodule."contrib/libxml2".update '!../sparse-checkout/update-libxml2.sh'
|
||||||
|
git config submodule."contrib/brotli".update '!../sparse-checkout/update-brotli.sh'
|
12
contrib/sparse-checkout/update-arrow.sh
Executable file
12
contrib/sparse-checkout/update-arrow.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for arrow"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/cpp/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
13
contrib/sparse-checkout/update-aws-s2n-tls.sh
Executable file
13
contrib/sparse-checkout/update-aws-s2n-tls.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for aws-s2n-tls"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/docs/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/compliance/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
13
contrib/sparse-checkout/update-aws.sh
Executable file
13
contrib/sparse-checkout/update-aws.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for aws"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/aws-cpp-sdk-core/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/aws-cpp-sdk-s3/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
85
contrib/sparse-checkout/update-boost.sh
Executable file
85
contrib/sparse-checkout/update-boost.sh
Executable file
@ -0,0 +1,85 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for boost"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/boost/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/algorithm/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/any/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/atomic/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/assert/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/bind/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/concept/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/config/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/container/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/container_hash/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/context/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/convert/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/coroutine/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/core/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/detail/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/dynamic_bitset/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/exception/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/filesystem/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/functional/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/function/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/geometry/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/graph/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/heap/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/integer/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/intrusive/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/iostreams/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/io/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/iterator/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/math/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/move/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/mpl/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/multi_index/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/multiprecision/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/numeric/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/predef/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/preprocessor/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/program_options/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/range/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/regex/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/smart_ptr/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/type_index/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/type_traits/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/system/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/tti/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/utility/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/lexical_cast/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/optional/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/property_map/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/pending/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/multi_array/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/tuple/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/icl/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/unordered/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/typeof/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/parameter/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/mp11/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/archive/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/function_types/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/serialization/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/fusion/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/variant/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/format/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/locale/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/random/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/spirit/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/uuid/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/xpressive/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/asio/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/circular_buffer/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/proto/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/qvm/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/boost/property_tree/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/libs/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
14
contrib/sparse-checkout/update-boringssl.sh
Executable file
14
contrib/sparse-checkout/update-boringssl.sh
Executable file
@ -0,0 +1,14 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for boringsll"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/fuzz/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/crypto/cipher_extra/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/third_party/wycheproof_testvectors/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/third_party/googletest/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
12
contrib/sparse-checkout/update-brotli.sh
Executable file
12
contrib/sparse-checkout/update-brotli.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for brotli"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/c/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
12
contrib/sparse-checkout/update-croaring.sh
Executable file
12
contrib/sparse-checkout/update-croaring.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for croaring"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/benchmarks/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/tests/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
22
contrib/sparse-checkout/update-grpc.sh
Executable file
22
contrib/sparse-checkout/update-grpc.sh
Executable file
@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for grpc"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/test/build/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/tools/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/tools/codegen/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/examples/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/doc/*' >> $FILES_TO_CHECKOUT
|
||||||
|
# FIXME why do we need csharp?
|
||||||
|
#echo '!/src/csharp/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/src/python/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/src/objective-c/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/src/php/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/src/ruby/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
12
contrib/sparse-checkout/update-h3.sh
Executable file
12
contrib/sparse-checkout/update-h3.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for h3"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/tests/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/website/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
12
contrib/sparse-checkout/update-icu.sh
Executable file
12
contrib/sparse-checkout/update-icu.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for icu"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/icu4c/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
16
contrib/sparse-checkout/update-libxml2.sh
Executable file
16
contrib/sparse-checkout/update-libxml2.sh
Executable file
@ -0,0 +1,16 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for libxml2"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/result/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/doc/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/os400/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/fuzz/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/python/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
27
contrib/sparse-checkout/update-llvm-project.sh
Executable file
27
contrib/sparse-checkout/update-llvm-project.sh
Executable file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for llvm-project"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/llvm/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/llvm/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/llvm/cmake/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/llvm/projects/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/llvm/include/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/llvm/lib/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/llvm/utils/TableGen/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/libcxxabi/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/libcxxabi/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/libcxx/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/libcxx/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/libunwind/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/libunwind/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/compiler-rt/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/compiler-rt/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/cmake/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
15
contrib/sparse-checkout/update-openssl.sh
Executable file
15
contrib/sparse-checkout/update-openssl.sh
Executable file
@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for openssl"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/fuzz/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/test/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/doc/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/providers/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '!/apps/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
13
contrib/sparse-checkout/update-orc.sh
Executable file
13
contrib/sparse-checkout/update-orc.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for orc"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '!/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/c++/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/proto/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
13
contrib/sparse-checkout/update-protobuf.sh
Executable file
13
contrib/sparse-checkout/update-protobuf.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo "Using sparse checkout for protobuf"
|
||||||
|
|
||||||
|
FILES_TO_CHECKOUT=$(git rev-parse --git-dir)/info/sparse-checkout
|
||||||
|
echo '!/*' > $FILES_TO_CHECKOUT
|
||||||
|
echo '/*/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/src/*' >> $FILES_TO_CHECKOUT
|
||||||
|
echo '/cmake/*' >> $FILES_TO_CHECKOUT
|
||||||
|
|
||||||
|
git config core.sparsecheckout true
|
||||||
|
git checkout $1
|
||||||
|
git read-tree -mu HEAD
|
11
contrib/update-submodules.sh
vendored
Executable file
11
contrib/update-submodules.sh
vendored
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
WORKDIR=$(dirname "$0")
|
||||||
|
WORKDIR=$(readlink -f "${WORKDIR}")
|
||||||
|
|
||||||
|
"$WORKDIR/sparse-checkout/setup-sparse-checkout.sh"
|
||||||
|
git submodule init
|
||||||
|
git submodule sync
|
||||||
|
git submodule update --depth=1
|
@ -18,13 +18,13 @@ RUN apt-get update \
|
|||||||
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
# and MEMORY_LIMIT_EXCEEDED exceptions in Functional tests (total memory limit in Functional tests is ~55.24 GiB).
|
||||||
# TSAN will flush shadow memory when reaching this limit.
|
# TSAN will flush shadow memory when reaching this limit.
|
||||||
# It may cause false-negatives, but it's better than OOM.
|
# It may cause false-negatives, but it's better than OOM.
|
||||||
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7 memory_limit_mb=46080'" >> /etc/environment
|
RUN echo "TSAN_OPTIONS='verbosity=1000 halt_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'" >> /etc/environment
|
||||||
RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
RUN echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
||||||
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment
|
RUN echo "MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'" >> /etc/environment
|
||||||
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment
|
RUN echo "LSAN_OPTIONS='suppressions=/usr/share/clickhouse-test/config/lsan_suppressions.txt'" >> /etc/environment
|
||||||
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
# Sanitizer options for current shell (not current, but the one that will be spawned on "docker run")
|
||||||
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
# (but w/o verbosity for TSAN, otherwise test.reference will not match)
|
||||||
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080'
|
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
||||||
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||||
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||||
|
|
||||||
|
@ -114,6 +114,12 @@ RUN set -x \
|
|||||||
&& echo 'dockremap:165536:65536' >> /etc/subuid \
|
&& echo 'dockremap:165536:65536' >> /etc/subuid \
|
||||||
&& echo 'dockremap:165536:65536' >> /etc/subgid
|
&& echo 'dockremap:165536:65536' >> /etc/subgid
|
||||||
|
|
||||||
|
# Same options as in test/base/Dockerfile
|
||||||
|
# (in case you need to override them in tests)
|
||||||
|
ENV TSAN_OPTIONS='halt_on_error=1 history_size=7 memory_limit_mb=46080 second_deadlock_stack=1'
|
||||||
|
ENV UBSAN_OPTIONS='print_stacktrace=1'
|
||||||
|
ENV MSAN_OPTIONS='abort_on_error=1 poison_in_dtor=1'
|
||||||
|
|
||||||
EXPOSE 2375
|
EXPOSE 2375
|
||||||
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
ENTRYPOINT ["dockerd-entrypoint.sh"]
|
||||||
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]
|
CMD ["sh", "-c", "pytest $PYTEST_OPTS"]
|
||||||
|
@ -13,7 +13,7 @@ You can install pre-built ClickHouse as described in [Quick Start](https://click
|
|||||||
The build works on x86_64 (Intel) and arm64 (Apple Silicon) based on macOS 10.15 (Catalina) or higher with Homebrew's vanilla Clang.
|
The build works on x86_64 (Intel) and arm64 (Apple Silicon) based on macOS 10.15 (Catalina) or higher with Homebrew's vanilla Clang.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
It is also possible to compile with Apple's XCode `apple-clang` or Homebrew's `gcc`, but it's strongly discouraged.
|
It is also possible to compile with Apple's XCode `apple-clang`, but it's strongly discouraged.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
## Install Homebrew {#install-homebrew}
|
## Install Homebrew {#install-homebrew}
|
||||||
@ -75,20 +75,6 @@ cmake --open .
|
|||||||
# The resulting binary will be created at: ./programs/Debug/clickhouse
|
# The resulting binary will be created at: ./programs/Debug/clickhouse
|
||||||
```
|
```
|
||||||
|
|
||||||
To build using Homebrew's vanilla GCC compiler (this option is only for development experiments, and is **absolutely not recommended** unless you really know what you are doing):
|
|
||||||
|
|
||||||
``` bash
|
|
||||||
cd ClickHouse
|
|
||||||
mkdir build
|
|
||||||
export PATH=$(brew --prefix binutils)/bin:$PATH
|
|
||||||
export PATH=$(brew --prefix gcc)/bin:$PATH
|
|
||||||
export CC=$(brew --prefix gcc)/bin/gcc-11
|
|
||||||
export CXX=$(brew --prefix gcc)/bin/g++-11
|
|
||||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=RelWithDebInfo -S . -B build
|
|
||||||
cmake --build build
|
|
||||||
# The resulting binary will be created at: build/programs/clickhouse
|
|
||||||
```
|
|
||||||
|
|
||||||
## Caveats {#caveats}
|
## Caveats {#caveats}
|
||||||
|
|
||||||
If you intend to run `clickhouse-server`, make sure to increase the system’s `maxfiles` variable.
|
If you intend to run `clickhouse-server`, make sure to increase the system’s `maxfiles` variable.
|
||||||
|
@ -39,9 +39,15 @@ Next, you need to download the source files onto your working machine. This is c
|
|||||||
|
|
||||||
In the command line terminal run:
|
In the command line terminal run:
|
||||||
|
|
||||||
git clone --recursive --shallow-submodules git@github.com:your_github_username/ClickHouse.git
|
git clone --shallow-submodules git@github.com:your_github_username/ClickHouse.git
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
|
|
||||||
|
Or (if you'd like to use sparse checkout for submodules and avoid checking out unneeded files):
|
||||||
|
|
||||||
|
git clone git@github.com:your_github_username/ClickHouse.git
|
||||||
|
cd ClickHouse
|
||||||
|
./contrib/update-submodules.sh
|
||||||
|
|
||||||
Note: please, substitute *your_github_username* with what is appropriate!
|
Note: please, substitute *your_github_username* with what is appropriate!
|
||||||
|
|
||||||
This command will create a directory `ClickHouse` containing the working copy of the project.
|
This command will create a directory `ClickHouse` containing the working copy of the project.
|
||||||
|
@ -140,3 +140,4 @@ DESCRIBE TABLE test_database.test_table;
|
|||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
||||||
|
- Blog: [ClickHouse and PostgreSQL - a Match Made in Data Heaven - part 2](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres-part-2)
|
||||||
|
@ -177,4 +177,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
|||||||
- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-postgresql)
|
- [Using PostgreSQL as a dictionary source](../../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-postgresql)
|
||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
||||||
|
- Blog: [ClickHouse and PostgreSQL - a Match Made in Data Heaven - part 2](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres-part-2)
|
||||||
|
@ -122,3 +122,7 @@ FROM test.mv_visits
|
|||||||
GROUP BY StartDate
|
GROUP BY StartDate
|
||||||
ORDER BY StartDate;
|
ORDER BY StartDate;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Using Aggregate Combinators in ClickHouse](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states)
|
||||||
|
@ -191,3 +191,7 @@ is performance. In practice, users often search for multiple terms at once. For
|
|||||||
'%big%'` can be evaluated directly using an inverted index by forming the union of the row id lists for terms "little" and "big". This also
|
'%big%'` can be evaluated directly using an inverted index by forming the union of the row id lists for terms "little" and "big". This also
|
||||||
means that the parameter `GRANULARITY` supplied to index creation has no meaning (it may be removed from the syntax in the future).
|
means that the parameter `GRANULARITY` supplied to index creation has no meaning (it may be removed from the syntax in the future).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Introducing Inverted Indices in ClickHouse](https://clickhouse.com/blog/clickhouse-search-with-inverted-indices)
|
||||||
|
@ -8,11 +8,18 @@ sidebar_label: Data Replication
|
|||||||
|
|
||||||
:::note
|
:::note
|
||||||
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
|
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
ENGINE = ReplicatedReplacingMergeTree(
|
||||||
|
'/clickhouse/tables/{shard}/table_name',
|
||||||
|
'{replica}',
|
||||||
|
ver
|
||||||
|
)
|
||||||
```
|
```
|
||||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
|
|
||||||
```
|
|
||||||
with:
|
with:
|
||||||
```
|
|
||||||
|
```sql
|
||||||
ENGINE = ReplicatedReplacingMergeTree
|
ENGINE = ReplicatedReplacingMergeTree
|
||||||
```
|
```
|
||||||
:::
|
:::
|
||||||
|
@ -186,3 +186,7 @@ ARRAY JOIN
|
|||||||
When requesting data, use the [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md) function for aggregation of `Map`.
|
When requesting data, use the [sumMap(key, value)](../../../sql-reference/aggregate-functions/reference/summap.md) function for aggregation of `Map`.
|
||||||
|
|
||||||
For nested data structure, you do not need to specify its columns in the tuple of columns for summation.
|
For nested data structure, you do not need to specify its columns in the tuple of columns for summation.
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Using Aggregate Combinators in ClickHouse](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states)
|
||||||
|
@ -112,3 +112,7 @@ If setting `keeper_map_strict_mode` is set to `true`, fetching and updating data
|
|||||||
```sql
|
```sql
|
||||||
ALTER TABLE keeper_map_table UPDATE v1 = v1 * 10 + 2 WHERE key LIKE 'some%' AND v3 > 3.1;
|
ALTER TABLE keeper_map_table UPDATE v1 = v1 * 10 + 2 WHERE key LIKE 'some%' AND v3 > 3.1;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Related content
|
||||||
|
|
||||||
|
- Blog: [Building a Real-time Analytics Apps with ClickHouse and Hex](https://clickhouse.com/blog/building-real-time-applications-with-clickhouse-and-hex-notebook-keeper-engine)
|
||||||
|
@ -2499,7 +2499,9 @@ LIMIT 20
|
|||||||
We welcome exact and improved solutions here.
|
We welcome exact and improved solutions here.
|
||||||
|
|
||||||
|
|
||||||
# Related Content
|
## Related Content
|
||||||
|
|
||||||
- [Git commits and our community](https://clickhouse.com/blog/clickhouse-git-community-commits)
|
- Blog: [Git commits and our community](https://clickhouse.com/blog/clickhouse-git-community-commits)
|
||||||
- [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
- Blog: [Window and array functions for Git commit sequences](https://clickhouse.com/blog/clickhouse-window-array-functions-git-commits)
|
||||||
|
- Blog: [Building a Real-time Analytics Apps with ClickHouse and Hex](https://clickhouse.com/blog/building-real-time-applications-with-clickhouse-and-hex-notebook-keeper-engine)
|
||||||
|
- Blog: [A Story of Open-source GitHub Activity using ClickHouse + Grafana](https://clickhouse.com/blog/introduction-to-clickhouse-and-grafana-webinar)
|
||||||
|
@ -78,7 +78,7 @@ The supported formats are:
|
|||||||
| [Null](#null) | ✗ | ✔ |
|
| [Null](#null) | ✗ | ✔ |
|
||||||
| [XML](#xml) | ✗ | ✔ |
|
| [XML](#xml) | ✗ | ✔ |
|
||||||
| [CapnProto](#capnproto) | ✔ | ✔ |
|
| [CapnProto](#capnproto) | ✔ | ✔ |
|
||||||
| [LineAsString](#lineasstring) | ✔ | ✗ |
|
| [LineAsString](#lineasstring) | ✔ | ✔ |
|
||||||
| [Regexp](#data-format-regexp) | ✔ | ✗ |
|
| [Regexp](#data-format-regexp) | ✔ | ✗ |
|
||||||
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
| [RawBLOB](#rawblob) | ✔ | ✔ |
|
||||||
| [MsgPack](#msgpack) | ✔ | ✔ |
|
| [MsgPack](#msgpack) | ✔ | ✔ |
|
||||||
@ -1877,6 +1877,13 @@ Column names must:
|
|||||||
|
|
||||||
Output Avro file compression and sync interval can be configured with [output_format_avro_codec](/docs/en/operations/settings/settings-formats.md/#output_format_avro_codec) and [output_format_avro_sync_interval](/docs/en/operations/settings/settings-formats.md/#output_format_avro_sync_interval) respectively.
|
Output Avro file compression and sync interval can be configured with [output_format_avro_codec](/docs/en/operations/settings/settings-formats.md/#output_format_avro_codec) and [output_format_avro_sync_interval](/docs/en/operations/settings/settings-formats.md/#output_format_avro_sync_interval) respectively.
|
||||||
|
|
||||||
|
### Example Data {#example-data-avro}
|
||||||
|
|
||||||
|
Using the ClickHouse [DESCRIBE](/docs/en/sql-reference/statements/describe-table) function, you can quickly view the inferred format of an Avro file like the following example. This example includes the URL of a publicly accessible Avro file in the ClickHouse S3 public bucket:
|
||||||
|
|
||||||
|
``` DESCRIBE url('https://clickhouse-public-datasets.s3.eu-central-1.amazonaws.com/hits.avro','Avro');
|
||||||
|
```
|
||||||
|
|
||||||
## AvroConfluent {#data-format-avro-confluent}
|
## AvroConfluent {#data-format-avro-confluent}
|
||||||
|
|
||||||
AvroConfluent supports decoding single-object Avro messages commonly used with [Kafka](https://kafka.apache.org/) and [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html).
|
AvroConfluent supports decoding single-object Avro messages commonly used with [Kafka](https://kafka.apache.org/) and [Confluent Schema Registry](https://docs.confluent.io/current/schema-registry/index.html).
|
||||||
@ -1936,30 +1943,31 @@ Setting `format_avro_schema_registry_url` needs to be configured in `users.xml`
|
|||||||
|
|
||||||
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
||||||
|
|
||||||
| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) |
|
| Parquet data type (`INSERT`) | ClickHouse data type | Parquet data type (`SELECT`) |
|
||||||
|----------------------------------------------------|-----------------------------------------------------------------|------------------------------|
|
|-----------------------------------------------|------------------------------------------------------------------------------------------------------------|-------------------------------|
|
||||||
| `BOOL` | [Bool](/docs/en/sql-reference/data-types/boolean.md) | `BOOL` |
|
| `BOOL` | [Bool](/docs/en/sql-reference/data-types/boolean.md) | `BOOL` |
|
||||||
| `UINT8`, `BOOL` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `UINT8` |
|
| `UINT8`, `BOOL` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `UINT8` |
|
||||||
| `INT8` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `INT8` |
|
| `INT8` | [Int8](/docs/en/sql-reference/data-types/int-uint.md)/[Enum8](/docs/en/sql-reference/data-types/enum.md) | `INT8` |
|
||||||
| `UINT16` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `UINT16` |
|
| `UINT16` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `UINT16` |
|
||||||
| `INT16` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `INT16` |
|
| `INT16` | [Int16](/docs/en/sql-reference/data-types/int-uint.md)/[Enum16](/docs/en/sql-reference/data-types/enum.md) | `INT16` |
|
||||||
| `UINT32` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `UINT32` |
|
| `UINT32` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `UINT32` |
|
||||||
| `INT32` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `INT32` |
|
| `INT32` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `INT32` |
|
||||||
| `UINT64` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `UINT64` |
|
| `UINT64` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `UINT64` |
|
||||||
| `INT64` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `INT64` |
|
| `INT64` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `INT64` |
|
||||||
| `FLOAT` | [Float32](/docs/en/sql-reference/data-types/float.md) | `FLOAT` |
|
| `FLOAT` | [Float32](/docs/en/sql-reference/data-types/float.md) | `FLOAT` |
|
||||||
| `DOUBLE` | [Float64](/docs/en/sql-reference/data-types/float.md) | `DOUBLE` |
|
| `DOUBLE` | [Float64](/docs/en/sql-reference/data-types/float.md) | `DOUBLE` |
|
||||||
| `DATE` | [Date32](/docs/en/sql-reference/data-types/date.md) | `DATE` |
|
| `DATE` | [Date32](/docs/en/sql-reference/data-types/date.md) | `DATE` |
|
||||||
| `TIME (ms)` | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `UINT32` |
|
| `TIME (ms)` | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `UINT32` |
|
||||||
| `TIMESTAMP`, `TIME (us, ns)` | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | `TIMESTAMP` |
|
| `TIMESTAMP`, `TIME (us, ns)` | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | `TIMESTAMP` |
|
||||||
| `STRING`, `BINARY` | [String](/docs/en/sql-reference/data-types/string.md) | `BINARY` |
|
| `STRING`, `BINARY` | [String](/docs/en/sql-reference/data-types/string.md) | `BINARY` |
|
||||||
| `STRING`, `BINARY`, `FIXED_LENGTH_BYTE_ARRAY` | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
| `STRING`, `BINARY`, `FIXED_LENGTH_BYTE_ARRAY` | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
||||||
| `DECIMAL` | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | `DECIMAL` |
|
| `DECIMAL` | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||||
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
||||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
||||||
| `FIXED_LENGTH_BYTE_ARRAY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
||||||
|
| `FIXED_LENGTH_BYTE_ARRAY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_LENGTH_BYTE_ARRAY` |
|
||||||
|
|
||||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||||
|
|
||||||
@ -2005,31 +2013,32 @@ To exchange data with Hadoop, you can use [HDFS table engine](/docs/en/engines/t
|
|||||||
|
|
||||||
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
||||||
|
|
||||||
| Arrow data type (`INSERT`) | ClickHouse data type | Arrow data type (`SELECT`) |
|
| Arrow data type (`INSERT`) | ClickHouse data type | Arrow data type (`SELECT`) |
|
||||||
|-----------------------------------------|-----------------------------------------------------------------|----------------------------|
|
|-----------------------------------------|------------------------------------------------------------------------------------------------------------|----------------------------|
|
||||||
| `BOOL` | [Bool](/docs/en/sql-reference/data-types/boolean.md) | `BOOL` |
|
| `BOOL` | [Bool](/docs/en/sql-reference/data-types/boolean.md) | `BOOL` |
|
||||||
| `UINT8`, `BOOL` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `UINT8` |
|
| `UINT8`, `BOOL` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `UINT8` |
|
||||||
| `INT8` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `INT8` |
|
| `INT8` | [Int8](/docs/en/sql-reference/data-types/int-uint.md)/[Enum8](/docs/en/sql-reference/data-types/enum.md) | `INT8` |
|
||||||
| `UINT16` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `UINT16` |
|
| `UINT16` | [UInt16](/docs/en/sql-reference/data-types/int-uint.md) | `UINT16` |
|
||||||
| `INT16` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `INT16` |
|
| `INT16` | [Int16](/docs/en/sql-reference/data-types/int-uint.md)/[Enum16](/docs/en/sql-reference/data-types/enum.md) | `INT16` |
|
||||||
| `UINT32` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `UINT32` |
|
| `UINT32` | [UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `UINT32` |
|
||||||
| `INT32` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `INT32` |
|
| `INT32` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `INT32` |
|
||||||
| `UINT64` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `UINT64` |
|
| `UINT64` | [UInt64](/docs/en/sql-reference/data-types/int-uint.md) | `UINT64` |
|
||||||
| `INT64` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `INT64` |
|
| `INT64` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `INT64` |
|
||||||
| `FLOAT`, `HALF_FLOAT` | [Float32](/docs/en/sql-reference/data-types/float.md) | `FLOAT32` |
|
| `FLOAT`, `HALF_FLOAT` | [Float32](/docs/en/sql-reference/data-types/float.md) | `FLOAT32` |
|
||||||
| `DOUBLE` | [Float64](/docs/en/sql-reference/data-types/float.md) | `FLOAT64` |
|
| `DOUBLE` | [Float64](/docs/en/sql-reference/data-types/float.md) | `FLOAT64` |
|
||||||
| `DATE32` | [Date32](/docs/en/sql-reference/data-types/date32.md) | `UINT16` |
|
| `DATE32` | [Date32](/docs/en/sql-reference/data-types/date32.md) | `UINT16` |
|
||||||
| `DATE64` | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `UINT32` |
|
| `DATE64` | [DateTime](/docs/en/sql-reference/data-types/datetime.md) | `UINT32` |
|
||||||
| `TIMESTAMP`, `TIME32`, `TIME64` | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | `UINT32` |
|
| `TIMESTAMP`, `TIME32`, `TIME64` | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | `UINT32` |
|
||||||
| `STRING`, `BINARY` | [String](/docs/en/sql-reference/data-types/string.md) | `BINARY` |
|
| `STRING`, `BINARY` | [String](/docs/en/sql-reference/data-types/string.md) | `BINARY` |
|
||||||
| `STRING`, `BINARY`, `FIXED_SIZE_BINARY` | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `FIXED_SIZE_BINARY` |
|
| `STRING`, `BINARY`, `FIXED_SIZE_BINARY` | [FixedString](/docs/en/sql-reference/data-types/fixedstring.md) | `FIXED_SIZE_BINARY` |
|
||||||
| `DECIMAL` | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | `DECIMAL` |
|
| `DECIMAL` | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | `DECIMAL` |
|
||||||
| `DECIMAL256` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DECIMAL256` |
|
| `DECIMAL256` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `DECIMAL256` |
|
||||||
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
| `LIST` | [Array](/docs/en/sql-reference/data-types/array.md) | `LIST` |
|
||||||
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
| `STRUCT` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `STRUCT` |
|
||||||
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
| `MAP` | [Map](/docs/en/sql-reference/data-types/map.md) | `MAP` |
|
||||||
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
| `UINT32` | [IPv4](/docs/en/sql-reference/data-types/domains/ipv4.md) | `UINT32` |
|
||||||
| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_SIZE_BINARY` |
|
| `FIXED_SIZE_BINARY`, `BINARY` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `FIXED_SIZE_BINARY` |
|
||||||
|
| `FIXED_SIZE_BINARY`, `BINARY` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `FIXED_SIZE_BINARY` |
|
||||||
|
|
||||||
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
Arrays can be nested and can have a value of the `Nullable` type as an argument. `Tuple` and `Map` types also can be nested.
|
||||||
|
|
||||||
@ -2078,23 +2087,26 @@ $ clickhouse-client --query="SELECT * FROM {some_table} FORMAT Arrow" > {filenam
|
|||||||
|
|
||||||
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
The table below shows supported data types and how they match ClickHouse [data types](/docs/en/sql-reference/data-types/index.md) in `INSERT` and `SELECT` queries.
|
||||||
|
|
||||||
| ORC data type (`INSERT`) | ClickHouse data type | ORC data type (`SELECT`) |
|
| ORC data type (`INSERT`) | ClickHouse data type | ORC data type (`SELECT`) |
|
||||||
|---------------------------------------|---------------------------------------------------------------|--------------------------|
|
|---------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------------------|
|
||||||
| `Boolean` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `Boolean` |
|
| `Boolean` | [UInt8](/docs/en/sql-reference/data-types/int-uint.md) | `Boolean` |
|
||||||
| `Tinyint` | [Int8](/docs/en/sql-reference/data-types/int-uint.md) | `Tinyint` |
|
| `Tinyint` | [Int8/UInt8](/docs/en/sql-reference/data-types/int-uint.md)/[Enum8](/docs/en/sql-reference/data-types/enum.md) | `Tinyint` |
|
||||||
| `Smallint` | [Int16](/docs/en/sql-reference/data-types/int-uint.md) | `Smallint` |
|
| `Smallint` | [Int16/UInt16](/docs/en/sql-reference/data-types/int-uint.md)/[Enum16](/docs/en/sql-reference/data-types/enum.md) | `Smallint` |
|
||||||
| `Int` | [Int32](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
|
| `Int` | [Int32/UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
|
||||||
| `Bigint` | [Int64](/docs/en/sql-reference/data-types/int-uint.md) | `Bigint` |
|
| `Bigint` | [Int64/UInt32](/docs/en/sql-reference/data-types/int-uint.md) | `Bigint` |
|
||||||
| `Float` | [Float32](/docs/en/sql-reference/data-types/float.md) | `Float` |
|
| `Float` | [Float32](/docs/en/sql-reference/data-types/float.md) | `Float` |
|
||||||
| `Double` | [Float64](/docs/en/sql-reference/data-types/float.md) | `Double` |
|
| `Double` | [Float64](/docs/en/sql-reference/data-types/float.md) | `Double` |
|
||||||
| `Decimal` | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | `Decimal` |
|
| `Decimal` | [Decimal](/docs/en/sql-reference/data-types/decimal.md) | `Decimal` |
|
||||||
| `Date` | [Date32](/docs/en/sql-reference/data-types/date32.md) | `Date` |
|
| `Date` | [Date32](/docs/en/sql-reference/data-types/date32.md) | `Date` |
|
||||||
| `Timestamp` | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | `Timestamp` |
|
| `Timestamp` | [DateTime64](/docs/en/sql-reference/data-types/datetime64.md) | `Timestamp` |
|
||||||
| `String`, `Char`, `Varchar`, `Binary` | [String](/docs/en/sql-reference/data-types/string.md) | `Binary` |
|
| `String`, `Char`, `Varchar`, `Binary` | [String](/docs/en/sql-reference/data-types/string.md) | `Binary` |
|
||||||
| `List` | [Array](/docs/en/sql-reference/data-types/array.md) | `List` |
|
| `List` | [Array](/docs/en/sql-reference/data-types/array.md) | `List` |
|
||||||
| `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` |
|
| `Struct` | [Tuple](/docs/en/sql-reference/data-types/tuple.md) | `Struct` |
|
||||||
| `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` |
|
| `Map` | [Map](/docs/en/sql-reference/data-types/map.md) | `Map` |
|
||||||
| `-` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
|
| `Int` | [IPv4](/docs/en/sql-reference/data-types/int-uint.md) | `Int` |
|
||||||
|
| `Binary` | [IPv6](/docs/en/sql-reference/data-types/domains/ipv6.md) | `Binary` |
|
||||||
|
| `Binary` | [Int128/UInt128/Int256/UInt256](/docs/en/sql-reference/data-types/int-uint.md) | `Binary` |
|
||||||
|
| `Binary` | [Decimal256](/docs/en/sql-reference/data-types/decimal.md) | `Binary` |
|
||||||
|
|
||||||
Other types are not supported.
|
Other types are not supported.
|
||||||
|
|
||||||
|
@ -61,3 +61,7 @@ FROM system.opentelemetry_span_log
|
|||||||
```
|
```
|
||||||
|
|
||||||
In case of any errors, the part of the log data for which the error has occurred will be silently lost. Check the server log for error messages if the data does not arrive.
|
In case of any errors, the part of the log data for which the error has occurred will be silently lost. Check the server log for error messages if the data does not arrive.
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Building an Observability Solution with ClickHouse - Part 2 - Traces](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse)
|
||||||
|
@ -124,3 +124,7 @@ Finally, entries in the query cache are not shared between users due to security
|
|||||||
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can
|
||||||
be marked accessible by other users (i.e. shared) by supplying setting
|
be marked accessible by other users (i.e. shared) by supplying setting
|
||||||
[query_cache_share_between_users](settings/settings.md#query-cache-share-between-users).
|
[query_cache_share_between_users](settings/settings.md#query-cache-share-between-users).
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Introducing the ClickHouse Query Cache](https://clickhouse.com/blog/introduction-to-the-clickhouse-query-cache-and-design)
|
||||||
|
@ -1045,7 +1045,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
## background_pool_size {#background_pool_size}
|
## background_pool_size {#background_pool_size}
|
||||||
|
|
||||||
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. This setting is also could be applied at server startup from the `default` profile configuration for backward compatibility at the ClickHouse server start. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
|
||||||
|
|
||||||
Before changing it, please also take a look at related MergeTree settings, such as [number_of_free_entries_in_pool_to_lower_max_size_of_merge](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-lower-max-size-of-merge) and [number_of_free_entries_in_pool_to_execute_mutation](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-execute-mutation).
|
Before changing it, please also take a look at related MergeTree settings, such as [number_of_free_entries_in_pool_to_lower_max_size_of_merge](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-lower-max-size-of-merge) and [number_of_free_entries_in_pool_to_execute_mutation](../../operations/settings/merge-tree-settings.md#number-of-free-entries-in-pool-to-execute-mutation).
|
||||||
|
|
||||||
@ -1063,8 +1063,8 @@ Default value: 16.
|
|||||||
|
|
||||||
## background_merges_mutations_concurrency_ratio {#background_merges_mutations_concurrency_ratio}
|
## background_merges_mutations_concurrency_ratio {#background_merges_mutations_concurrency_ratio}
|
||||||
|
|
||||||
Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example if the ratio equals to 2 and
|
Sets a ratio between the number of threads and the number of background merges and mutations that can be executed concurrently. For example, if the ratio equals to 2 and
|
||||||
`background_pool_size` is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operation could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server.
|
`background_pool_size` is set to 16 then ClickHouse can execute 32 background merges concurrently. This is possible, because background operations could be suspended and postponed. This is needed to give small merges more execution priority. You can only increase this ratio at runtime. To lower it you have to restart the server.
|
||||||
The same as for `background_pool_size` setting `background_merges_mutations_concurrency_ratio` could be applied from the `default` profile for backward compatibility.
|
The same as for `background_pool_size` setting `background_merges_mutations_concurrency_ratio` could be applied from the `default` profile for backward compatibility.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
@ -1079,6 +1079,33 @@ Default value: 2.
|
|||||||
<background_merges_mutations_concurrency_ratio>3</background_merges_mutations_concurrency_ratio>
|
<background_merges_mutations_concurrency_ratio>3</background_merges_mutations_concurrency_ratio>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## merges_mutations_memory_usage_soft_limit {#merges_mutations_memory_usage_soft_limit}
|
||||||
|
|
||||||
|
Sets the limit on how much RAM is allowed to use for performing merge and mutation operations.
|
||||||
|
Zero means unlimited.
|
||||||
|
If ClickHouse reaches this limit, it won't schedule any new background merge or mutation operations but will continue to execute already scheduled tasks.
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- Any positive integer.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<merges_mutations_memory_usage_soft_limit>0</merges_mutations_memory_usage_soft_limit>
|
||||||
|
```
|
||||||
|
|
||||||
|
## merges_mutations_memory_usage_to_ram_ratio {#merges_mutations_memory_usage_to_ram_ratio}
|
||||||
|
|
||||||
|
The default `merges_mutations_memory_usage_soft_limit` value is calculated as `memory_amount * merges_mutations_memory_usage_to_ram_ratio`.
|
||||||
|
|
||||||
|
Default value: `0.5`.
|
||||||
|
|
||||||
|
**See also**
|
||||||
|
|
||||||
|
- [max_memory_usage](../../operations/settings/query-complexity.md#settings_max_memory_usage)
|
||||||
|
- [merges_mutations_memory_usage_soft_limit](#merges_mutations_memory_usage_soft_limit)
|
||||||
|
|
||||||
## background_merges_mutations_scheduling_policy {#background_merges_mutations_scheduling_policy}
|
## background_merges_mutations_scheduling_policy {#background_merges_mutations_scheduling_policy}
|
||||||
|
|
||||||
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
Algorithm used to select next merge or mutation to be executed by background thread pool. Policy may be changed at runtime without server restart.
|
||||||
|
@ -40,6 +40,39 @@ SETTINGS additional_table_filters = (('table_1', 'x != 2'))
|
|||||||
└───┴──────┘
|
└───┴──────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## additional_result_filter
|
||||||
|
|
||||||
|
An additional filter expression to apply to the result of `SELECT` query.
|
||||||
|
This setting is not applied to any subquery.
|
||||||
|
|
||||||
|
Default value: `''`.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd');
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─x─┬─y────┐
|
||||||
|
│ 1 │ a │
|
||||||
|
│ 2 │ bb │
|
||||||
|
│ 3 │ ccc │
|
||||||
|
│ 4 │ dddd │
|
||||||
|
└───┴──────┘
|
||||||
|
```
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM table_1
|
||||||
|
SETTINGS additional_result_filter = 'x != 2'
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
┌─x─┬─y────┐
|
||||||
|
│ 1 │ a │
|
||||||
|
│ 3 │ ccc │
|
||||||
|
│ 4 │ dddd │
|
||||||
|
└───┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
## allow_nondeterministic_mutations {#allow_nondeterministic_mutations}
|
## allow_nondeterministic_mutations {#allow_nondeterministic_mutations}
|
||||||
|
|
||||||
User-level setting that allows mutations on replicated tables to make use of non-deterministic functions such as `dictGet`.
|
User-level setting that allows mutations on replicated tables to make use of non-deterministic functions such as `dictGet`.
|
||||||
|
@ -50,6 +50,7 @@ last_queue_update: 2021-10-12 14:50:08
|
|||||||
absolute_delay: 99
|
absolute_delay: 99
|
||||||
total_replicas: 5
|
total_replicas: 5
|
||||||
active_replicas: 5
|
active_replicas: 5
|
||||||
|
lost_part_count: 0
|
||||||
last_queue_update_exception:
|
last_queue_update_exception:
|
||||||
zookeeper_exception:
|
zookeeper_exception:
|
||||||
replica_is_active: {'r1':1,'r2':1}
|
replica_is_active: {'r1':1,'r2':1}
|
||||||
@ -90,6 +91,7 @@ The next 4 columns have a non-zero value only where there is an active session w
|
|||||||
- `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has.
|
- `absolute_delay` (`UInt64`) - How big lag in seconds the current replica has.
|
||||||
- `total_replicas` (`UInt8`) - The total number of known replicas of this table.
|
- `total_replicas` (`UInt8`) - The total number of known replicas of this table.
|
||||||
- `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ClickHouse Keeper (i.e., the number of functioning replicas).
|
- `active_replicas` (`UInt8`) - The number of replicas of this table that have a session in ClickHouse Keeper (i.e., the number of functioning replicas).
|
||||||
|
- `lost_part_count` (`UInt64`) - The number of data parts lost in the table by all replicas in total since table creation. Value is persisted in ClickHouse Keeper and can only increase.
|
||||||
- `last_queue_update_exception` (`String`) - When the queue contains broken entries. Especially important when ClickHouse breaks backward compatibility between versions and log entries written by newer versions aren't parseable by old versions.
|
- `last_queue_update_exception` (`String`) - When the queue contains broken entries. Especially important when ClickHouse breaks backward compatibility between versions and log entries written by newer versions aren't parseable by old versions.
|
||||||
- `zookeeper_exception` (`String`) - The last exception message, got if the error happened when fetching the info from ClickHouse Keeper.
|
- `zookeeper_exception` (`String`) - The last exception message, got if the error happened when fetching the info from ClickHouse Keeper.
|
||||||
- `replica_is_active` ([Map(String, UInt8)](../../sql-reference/data-types/map.md)) — Map between replica name and is replica active.
|
- `replica_is_active` ([Map(String, UInt8)](../../sql-reference/data-types/map.md)) — Map between replica name and is replica active.
|
||||||
|
@ -11,8 +11,16 @@ Columns:
|
|||||||
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
- `volume_name` ([String](../../sql-reference/data-types/string.md)) — Volume name defined in the storage policy.
|
||||||
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration, the data fills the volumes according this priority, i.e. data during inserts and merges is written to volumes with a lower priority (taking into account other rules: TTL, `max_data_part_size`, `move_factor`).
|
- `volume_priority` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Volume order number in the configuration, the data fills the volumes according this priority, i.e. data during inserts and merges is written to volumes with a lower priority (taking into account other rules: TTL, `max_data_part_size`, `move_factor`).
|
||||||
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
- `disks` ([Array(String)](../../sql-reference/data-types/array.md)) — Disk names, defined in the storage policy.
|
||||||
|
- `volume_type` ([Enum8](../../sql-reference/data-types/enum.md)) — Type of volume. Can have one of the following values:
|
||||||
|
- `JBOD`
|
||||||
|
- `SINGLE_DISK`
|
||||||
|
- `UNKNOWN`
|
||||||
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
- `max_data_part_size` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Maximum size of a data part that can be stored on volume disks (0 — no limit).
|
||||||
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
- `move_factor` ([Float64](../../sql-reference/data-types/float.md)) — Ratio of free disk space. When the ratio exceeds the value of configuration parameter, ClickHouse start to move data to the next volume in order.
|
||||||
- `prefer_not_to_merge` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Value of the `prefer_not_to_merge` setting. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
- `prefer_not_to_merge` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Value of the `prefer_not_to_merge` setting. When this setting is enabled, merging data on this volume is not allowed. This allows controlling how ClickHouse works with slow disks.
|
||||||
|
- `perform_ttl_move_on_insert` ([UInt8](../../sql-reference/data-types/int-uint.md)) — Value of the `perform_ttl_move_on_insert` setting. — Disables TTL move on data part INSERT. By default if we insert a data part that already expired by the TTL move rule it immediately goes to a volume/disk declared in move rule. This can significantly slowdown insert in case if destination volume/disk is slow (e.g. S3).
|
||||||
|
- `load_balancing` ([Enum8](../../sql-reference/data-types/enum.md)) — Policy for disk balancing. Can have one of the following values:
|
||||||
|
- `ROUND_ROBIN`
|
||||||
|
- `LEAST_USED`
|
||||||
|
|
||||||
If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table.
|
If the storage policy contains more then one volume, then information for each volume is stored in the individual row of the table.
|
||||||
|
@ -6,7 +6,17 @@ sidebar_label: clickhouse-local
|
|||||||
|
|
||||||
# clickhouse-local
|
# clickhouse-local
|
||||||
|
|
||||||
The `clickhouse-local` program enables you to perform fast processing on local files, without having to deploy and configure the ClickHouse server. It accepts data that represent tables and queries them using [ClickHouse SQL dialect](../../sql-reference/index.md). `clickhouse-local` uses the same core as ClickHouse server, so it supports most of the features and the same set of formats and table engines.
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Extracting, Converting, and Querying Data in Local Files using clickhouse-local](https://clickhouse.com/blog/extracting-converting-querying-local-files-with-sql-clickhouse-local)
|
||||||
|
|
||||||
|
## When to use clickhouse-local vs. ClickHouse
|
||||||
|
|
||||||
|
`clickhouse-local` is an easy-to-use version of ClickHouse that is ideal for developers who need to perform fast processing on local and remote files using SQL without having to install a full database server. With `clickhouse-local`, developers can use SQL commands (using the [ClickHouse SQL dialect](../../sql-reference/index.md)) directly from the command line, providing a simple and efficient way to access ClickHouse features without the need for a full ClickHouse installation. One of the main benefits of `clickhouse-local` is that it is already included when installing [clickhouse-client](https://clickhouse.com/docs/en/integrations/sql-clients/clickhouse-client-local). This means that developers can get started with `clickhouse-local` quickly, without the need for a complex installation process.
|
||||||
|
|
||||||
|
While `clickhouse-local` is a great tool for development and testing purposes, and for processing files, it is not suitable for serving end users or applications. In these scenarios, it is recommended to use the open-source [ClickHouse](https://clickhouse.com/docs/en/install). ClickHouse is a powerful OLAP database that is designed to handle large-scale analytical workloads. It provides fast and efficient processing of complex queries on large datasets, making it ideal for use in production environments where high-performance is critical. Additionally, ClickHouse offers a wide range of features such as replication, sharding, and high availability, which are essential for scaling up to handle large datasets and serving applications. If you need to handle larger datasets or serve end users or applications, we recommend using open-source ClickHouse instead of `clickhouse-local`.
|
||||||
|
|
||||||
|
Please read the docs below that show example use cases for `clickhouse-local`, such as [querying local CSVs](#query-data-in-a-csv-file-using-sql) or [reading a parquet file in S3](#query-data-in-a-parquet-file-in-aws-s3).
|
||||||
|
|
||||||
## Download clickhouse-local
|
## Download clickhouse-local
|
||||||
|
|
||||||
|
@ -285,3 +285,8 @@ FROM people
|
|||||||
│ [3,2] │ [11.5,12.949999809265137] │
|
│ [3,2] │ [11.5,12.949999809265137] │
|
||||||
└────────┴───────────────────────────┘
|
└────────┴───────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Using Aggregate Combinators in ClickHouse](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states)
|
||||||
|
@ -6,7 +6,7 @@ title: deltaSumTimestamp
|
|||||||
|
|
||||||
Adds the difference between consecutive rows. If the difference is negative, it is ignored.
|
Adds the difference between consecutive rows. If the difference is negative, it is ignored.
|
||||||
|
|
||||||
This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that are ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the "right" order. This function keeps track of the `timestamp` of the values it's seen, so it's possible to order the states correctly during merging.
|
This function is primarily for [materialized views](../../../sql-reference/statements/create/view.md#materialized) that store data ordered by some time bucket-aligned timestamp, for example, a `toStartOfMinute` bucket. Because the rows in such a materialized view will all have the same timestamp, it is impossible for them to be merged in the correct order, without storing the original, unrounded timestamp value. The `deltaSumTimestamp` function keeps track of the original `timestamp` of the values it's seen, so the values (states) of the function are correctly computed during merging of parts.
|
||||||
|
|
||||||
To calculate the delta sum across an ordered collection you can simply use the [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) function.
|
To calculate the delta sum across an ordered collection you can simply use the [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) function.
|
||||||
|
|
||||||
|
@ -63,3 +63,8 @@ SELECT uniqMerge(state) FROM (SELECT uniqState(UserID) AS state FROM table GROUP
|
|||||||
## Usage Example
|
## Usage Example
|
||||||
|
|
||||||
See [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) engine description.
|
See [AggregatingMergeTree](../../engines/table-engines/mergetree-family/aggregatingmergetree.md) engine description.
|
||||||
|
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [Using Aggregate Combinators in ClickHouse](https://clickhouse.com/blog/aggregate-functions-combinators-in-clickhouse-for-arrays-maps-and-states)
|
||||||
|
@ -108,3 +108,8 @@ Result:
|
|||||||
|
|
||||||
- [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) function
|
- [map()](../../sql-reference/functions/tuple-map-functions.md#function-map) function
|
||||||
- [CAST()](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function
|
- [CAST()](../../sql-reference/functions/type-conversion-functions.md#type_conversion_function-cast) function
|
||||||
|
|
||||||
|
|
||||||
|
## Related content
|
||||||
|
|
||||||
|
- Blog: [Building an Observability Solution with ClickHouse - Part 2 - Traces](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse)
|
||||||
|
@ -645,7 +645,7 @@ For an alternative to `date\_diff`, see function `age`.
|
|||||||
date_diff('unit', startdate, enddate, [timezone])
|
date_diff('unit', startdate, enddate, [timezone])
|
||||||
```
|
```
|
||||||
|
|
||||||
Aliases: `dateDiff`, `DATE_DIFF`.
|
Aliases: `dateDiff`, `DATE_DIFF`, `timestampDiff`, `timestamp_diff`, `TIMESTAMP_DIFF`.
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
@ -1264,7 +1264,7 @@ Using replacement fields, you can define a pattern for the resulting string. “
|
|||||||
| %d | day of the month, zero-padded (01-31) | 02 |
|
| %d | day of the month, zero-padded (01-31) | 02 |
|
||||||
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/18 |
|
| %D | Short MM/DD/YY date, equivalent to %m/%d/%y | 01/02/18 |
|
||||||
| %e | day of the month, space-padded (1-31) | 2 |
|
| %e | day of the month, space-padded (1-31) | 2 |
|
||||||
| %f | fractional second from the fractional part of DateTime64 | 1234560 |
|
| %f | fractional second, see 'Note 1' below | 1234560 |
|
||||||
| %F | short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2018-01-02 |
|
| %F | short YYYY-MM-DD date, equivalent to %Y-%m-%d | 2018-01-02 |
|
||||||
| %g | two-digit year format, aligned to ISO 8601, abbreviated from four-digit notation | 18 |
|
| %g | two-digit year format, aligned to ISO 8601, abbreviated from four-digit notation | 18 |
|
||||||
| %G | four-digit year format for ISO week number, calculated from the week-based year [defined by the ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates) standard, normally useful only with %V | 2018 |
|
| %G | four-digit year format for ISO week number, calculated from the week-based year [defined by the ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Week_dates) standard, normally useful only with %V | 2018 |
|
||||||
@ -1276,16 +1276,16 @@ Using replacement fields, you can define a pattern for the resulting string. “
|
|||||||
| %k | hour in 24h format (00-23) | 22 |
|
| %k | hour in 24h format (00-23) | 22 |
|
||||||
| %l | hour in 12h format (01-12) | 09 |
|
| %l | hour in 12h format (01-12) | 09 |
|
||||||
| %m | month as an integer number (01-12) | 01 |
|
| %m | month as an integer number (01-12) | 01 |
|
||||||
| %M | minute (00-59) | 33 |
|
| %M | full month name (January-December), see 'Note 2' below | January |
|
||||||
| %n | new-line character (‘’) | |
|
| %n | new-line character (‘’) | |
|
||||||
| %p | AM or PM designation | PM |
|
| %p | AM or PM designation | PM |
|
||||||
| %Q | Quarter (1-4) | 1 |
|
| %Q | Quarter (1-4) | 1 |
|
||||||
| %r | 12-hour HH:MM AM/PM time, equivalent to %H:%M %p | 10:30 PM |
|
| %r | 12-hour HH:MM AM/PM time, equivalent to %H:%i %p | 10:30 PM |
|
||||||
| %R | 24-hour HH:MM time, equivalent to %H:%M | 22:33 |
|
| %R | 24-hour HH:MM time, equivalent to %H:%i | 22:33 |
|
||||||
| %s | second (00-59) | 44 |
|
| %s | second (00-59) | 44 |
|
||||||
| %S | second (00-59) | 44 |
|
| %S | second (00-59) | 44 |
|
||||||
| %t | horizontal-tab character (’) | |
|
| %t | horizontal-tab character (’) | |
|
||||||
| %T | ISO 8601 time format (HH:MM:SS), equivalent to %H:%M:%S | 22:33:44 |
|
| %T | ISO 8601 time format (HH:MM:SS), equivalent to %H:%i:%S | 22:33:44 |
|
||||||
| %u | ISO 8601 weekday as number with Monday as 1 (1-7) | 2 |
|
| %u | ISO 8601 weekday as number with Monday as 1 (1-7) | 2 |
|
||||||
| %V | ISO 8601 week number (01-53) | 01 |
|
| %V | ISO 8601 week number (01-53) | 01 |
|
||||||
| %w | weekday as a integer number with Sunday as 0 (0-6) | 2 |
|
| %w | weekday as a integer number with Sunday as 0 (0-6) | 2 |
|
||||||
@ -1295,6 +1295,10 @@ Using replacement fields, you can define a pattern for the resulting string. “
|
|||||||
| %z | Time offset from UTC as +HHMM or -HHMM | -0500 |
|
| %z | Time offset from UTC as +HHMM or -HHMM | -0500 |
|
||||||
| %% | a % sign | % |
|
| %% | a % sign | % |
|
||||||
|
|
||||||
|
Note 1: In ClickHouse versions earlier than v23.4, `%f` prints a single zero (0) if the formatted value is a Date, Date32 or DateTime (which have no fractional seconds) or a DateTime64 with a precision of 0. The previous behavior can be restored using setting `formatdatetime_f_prints_single_zero = 1`.
|
||||||
|
|
||||||
|
Note 2: In ClickHouse versions earlier than v23.4, `%M` prints the minute (00-59) instead of the full month name (January-December). The previous behavior can be restored using setting `formatdatetime_parsedatetime_m_is_month_name = 0`.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
@ -441,11 +441,11 @@ SELECT farmHash64(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:0
|
|||||||
|
|
||||||
## javaHash
|
## javaHash
|
||||||
|
|
||||||
Calculates JavaHash from a [string](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452),
|
Calculates JavaHash from a [string](http://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/String.java#l1452),
|
||||||
[Byte](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Byte.java#l405),
|
[Byte](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Byte.java#l405),
|
||||||
[Short](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Short.java#l410),
|
[Short](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Short.java#l410),
|
||||||
[Integer](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Integer.java#l959),
|
[Integer](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Integer.java#l959),
|
||||||
[Long](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Long.java#l1060).
|
[Long](https://hg.openjdk.java.net/jdk8u/jdk8u/jdk/file/478a4add975b/src/share/classes/java/lang/Long.java#l1060).
|
||||||
This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result.
|
This hash function is neither fast nor having a good quality. The only reason to use it is when this algorithm is already used in another system and you have to calculate exactly the same result.
|
||||||
|
|
||||||
Note that Java only support calculating signed integers hash, so if you want to calculate unsigned integers hash you must cast it to proper signed ClickHouse types.
|
Note that Java only support calculating signed integers hash, so if you want to calculate unsigned integers hash you must cast it to proper signed ClickHouse types.
|
||||||
@ -660,6 +660,45 @@ Result:
|
|||||||
└──────────────────────┴─────────────────────┘
|
└──────────────────────┴─────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## kafkaMurmurHash
|
||||||
|
|
||||||
|
Calculates a 32-bit [MurmurHash2](https://github.com/aappleby/smhasher) hash value using the same hash seed as [Kafka](https://github.com/apache/kafka/blob/461c5cfe056db0951d9b74f5adc45973670404d7/clients/src/main/java/org/apache/kafka/common/utils/Utils.java#L482) and without the highest bit to be compatible with [Default Partitioner](https://github.com/apache/kafka/blob/139f7709bd3f5926901a21e55043388728ccca78/clients/src/main/java/org/apache/kafka/clients/producer/internals/BuiltInPartitioner.java#L328).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
MurmurHash(par1, ...)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `par1, ...` — A variable number of parameters that can be any of the [supported data types](/docs/en/sql-reference/data-types/index.md/#data_types).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- Calculated hash value.
|
||||||
|
|
||||||
|
Type: [UInt32](/docs/en/sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
kafkaMurmurHash('foobar') AS res1,
|
||||||
|
kafkaMurmurHash(array('e','x','a'), 'mple', 10, toDateTime('2019-06-15 23:00:00')) AS res2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌───────res1─┬─────res2─┐
|
||||||
|
│ 1357151166 │ 85479775 │
|
||||||
|
└────────────┴──────────┘
|
||||||
|
```
|
||||||
|
|
||||||
## murmurHash3_32, murmurHash3_64
|
## murmurHash3_32, murmurHash3_64
|
||||||
|
|
||||||
Produces a [MurmurHash3](https://github.com/aappleby/smhasher) hash value.
|
Produces a [MurmurHash3](https://github.com/aappleby/smhasher) hash value.
|
||||||
|
@ -194,7 +194,14 @@ Accepts a number. If the number is less than one, it returns 0. Otherwise, it ro
|
|||||||
|
|
||||||
## roundAge(num)
|
## roundAge(num)
|
||||||
|
|
||||||
Accepts a number. If the number is less than 18, it returns 0. Otherwise, it rounds the number down to a number from the set: 18, 25, 35, 45, 55.
|
Accepts a number. If the number is
|
||||||
|
- smaller than 1, it returns 0,
|
||||||
|
- between 1 and 17, it returns 17,
|
||||||
|
- between 18 and 24, it returns 18,
|
||||||
|
- between 25 and 34, it returns 25,
|
||||||
|
- between 35 and 44, it returns 35,
|
||||||
|
- between 45 and 54, it returns 45,
|
||||||
|
- larger than 55, it returns 55.
|
||||||
|
|
||||||
## roundDown(num, arr)
|
## roundDown(num, arr)
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: Strings
|
|||||||
|
|
||||||
# Functions for Working with Strings
|
# Functions for Working with Strings
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [replacing](../../sql-reference/functions/string-replace-functions.md) in strings are described separately.
|
Functions for [searching](../../sql-reference/functions/string-search-functions.md) and [replacing](../../sql-reference/functions/string-replace-functions.md) in strings are described separately.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -1193,6 +1193,42 @@ Result:
|
|||||||
```
|
```
|
||||||
|
|
||||||
## concatWithSeparatorAssumeInjective
|
## concatWithSeparatorAssumeInjective
|
||||||
|
|
||||||
Same as concatWithSeparator, the difference is that you need to ensure that concatWithSeparator(sep, expr1, expr2, expr3...) → result is injective, it will be used for optimization of GROUP BY.
|
Same as concatWithSeparator, the difference is that you need to ensure that concatWithSeparator(sep, expr1, expr2, expr3...) → result is injective, it will be used for optimization of GROUP BY.
|
||||||
|
|
||||||
The function is named “injective” if it always returns different result for different values of arguments. In other words: different arguments never yield identical result.
|
The function is named “injective” if it always returns different result for different values of arguments. In other words: different arguments never yield identical result.
|
||||||
|
|
||||||
|
## soundex
|
||||||
|
|
||||||
|
Returns the [Soundex code](https://en.wikipedia.org/wiki/Soundex) of a string.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
soundex(val)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `val` - Input value. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The Soundex code of the input value. [String](../data-types/string.md)
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
select soundex('aksel');
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
``` text
|
||||||
|
┌─soundex('aksel')─┐
|
||||||
|
│ A240 │
|
||||||
|
└──────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
@ -13,17 +13,18 @@ Functions for [searching](../../sql-reference/functions/string-search-functions.
|
|||||||
## replaceOne(haystack, pattern, replacement)
|
## replaceOne(haystack, pattern, replacement)
|
||||||
|
|
||||||
Replaces the first occurrence of the substring ‘pattern’ (if it exists) in ‘haystack’ by the ‘replacement’ string.
|
Replaces the first occurrence of the substring ‘pattern’ (if it exists) in ‘haystack’ by the ‘replacement’ string.
|
||||||
‘pattern’ and ‘replacement’ must be constants.
|
|
||||||
|
|
||||||
## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement)
|
## replaceAll(haystack, pattern, replacement), replace(haystack, pattern, replacement)
|
||||||
|
|
||||||
Replaces all occurrences of the substring ‘pattern’ in ‘haystack’ by the ‘replacement’ string.
|
Replaces all occurrences of the substring ‘pattern’ in ‘haystack’ by the ‘replacement’ string.
|
||||||
|
|
||||||
|
Alias: `replace`.
|
||||||
|
|
||||||
## replaceRegexpOne(haystack, pattern, replacement)
|
## replaceRegexpOne(haystack, pattern, replacement)
|
||||||
|
|
||||||
Replaces the first occurrence of the substring matching the regular expression ‘pattern’ in ‘haystack‘ by the ‘replacement‘ string.
|
Replaces the first occurrence of the substring matching the regular expression ‘pattern’ in ‘haystack‘ by the ‘replacement‘ string.
|
||||||
‘pattern‘ must be a constant [re2 regular expression](https://github.com/google/re2/wiki/Syntax).
|
‘pattern‘ must be a [re2 regular expression](https://github.com/google/re2/wiki/Syntax).
|
||||||
‘replacement’ must be a plain constant string or a constant string containing substitutions `\0-\9`.
|
‘replacement’ must be a plain string or a string containing substitutions `\0-\9`.
|
||||||
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
|
Substitutions `\1-\9` correspond to the 1st to 9th capturing group (submatch), substitution `\0` corresponds to the entire match.
|
||||||
To use a verbatim `\` character in the ‘pattern‘ or ‘replacement‘ string, escape it using `\`.
|
To use a verbatim `\` character in the ‘pattern‘ or ‘replacement‘ string, escape it using `\`.
|
||||||
Also keep in mind that string literals require an extra escaping.
|
Also keep in mind that string literals require an extra escaping.
|
||||||
@ -88,6 +89,8 @@ SELECT replaceRegexpAll('Hello, World!', '^', 'here: ') AS res
|
|||||||
└─────────────────────┘
|
└─────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alias: `REGEXP_REPLACE`.
|
||||||
|
|
||||||
## regexpQuoteMeta(s)
|
## regexpQuoteMeta(s)
|
||||||
|
|
||||||
The function adds a backslash before some predefined characters in the string.
|
The function adds a backslash before some predefined characters in the string.
|
||||||
|
@ -1245,7 +1245,6 @@ Returns DateTime values parsed from input string according to a MySQL style form
|
|||||||
**Supported format specifiers**
|
**Supported format specifiers**
|
||||||
|
|
||||||
All format specifiers listed in [formatDateTime](/docs/en/sql-reference/functions/date-time-functions.md#date_time_functions-formatDateTime) except:
|
All format specifiers listed in [formatDateTime](/docs/en/sql-reference/functions/date-time-functions.md#date_time_functions-formatDateTime) except:
|
||||||
- %f: fractional second
|
|
||||||
- %Q: Quarter (1-4)
|
- %Q: Quarter (1-4)
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -28,3 +28,7 @@ The synchronicity of the query processing is defined by the [mutations_sync](/do
|
|||||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
||||||
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||||
- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting
|
- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting
|
||||||
|
|
||||||
|
## Related content
|
||||||
|
|
||||||
|
- Blog: [Handling Updates and Deletes in ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse)
|
||||||
|
@ -61,3 +61,7 @@ For all `ALTER` queries, if `alter_sync = 2` and some replicas are not active fo
|
|||||||
:::
|
:::
|
||||||
|
|
||||||
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
For `ALTER TABLE ... UPDATE|DELETE` queries the synchronicity is defined by the [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting.
|
||||||
|
|
||||||
|
## Related content
|
||||||
|
|
||||||
|
- Blog: [Handling Updates and Deletes in ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse)
|
||||||
|
@ -27,3 +27,8 @@ The synchronicity of the query processing is defined by the [mutations_sync](/do
|
|||||||
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
- [Mutations](/docs/en/sql-reference/statements/alter/index.md#mutations)
|
||||||
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
- [Synchronicity of ALTER Queries](/docs/en/sql-reference/statements/alter/index.md#synchronicity-of-alter-queries)
|
||||||
- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting
|
- [mutations_sync](/docs/en/operations/settings/settings.md/#mutations_sync) setting
|
||||||
|
|
||||||
|
|
||||||
|
## Related content
|
||||||
|
|
||||||
|
- Blog: [Handling Updates and Deletes in ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse)
|
||||||
|
@ -364,3 +364,4 @@ The window view is useful in the following scenarios:
|
|||||||
## Related Content
|
## Related Content
|
||||||
|
|
||||||
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
- Blog: [Working with time series data in ClickHouse](https://clickhouse.com/blog/working-with-time-series-data-and-functions-ClickHouse)
|
||||||
|
- Blog: [Building an Observability Solution with ClickHouse - Part 2 - Traces](https://clickhouse.com/blog/storing-traces-and-spans-open-telemetry-in-clickhouse)
|
||||||
|
@ -55,3 +55,7 @@ With the described implementation now we can see what can negatively affect 'DEL
|
|||||||
- Table having a very large number of data parts
|
- Table having a very large number of data parts
|
||||||
- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file.
|
- Having a lot of data in Compact parts—in a Compact part, all columns are stored in one file.
|
||||||
|
|
||||||
|
|
||||||
|
## Related content
|
||||||
|
|
||||||
|
- Blog: [Handling Updates and Deletes in ClickHouse](https://clickhouse.com/blog/handling-updates-and-deletes-in-clickhouse)
|
||||||
|
@ -22,6 +22,10 @@ DROP DATABASE [IF EXISTS] db [ON CLUSTER cluster] [SYNC]
|
|||||||
|
|
||||||
Deletes the table.
|
Deletes the table.
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
Also see [UNDROP TABLE](/docs/en/sql-reference/statements/undrop.md)
|
||||||
|
:::
|
||||||
|
|
||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -18,6 +18,10 @@ FROM <left_table>
|
|||||||
|
|
||||||
Expressions from `ON` clause and columns from `USING` clause are called “join keys”. Unless otherwise stated, join produces a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from rows with matching “join keys”, which might produce results with much more rows than the source tables.
|
Expressions from `ON` clause and columns from `USING` clause are called “join keys”. Unless otherwise stated, join produces a [Cartesian product](https://en.wikipedia.org/wiki/Cartesian_product) from rows with matching “join keys”, which might produce results with much more rows than the source tables.
|
||||||
|
|
||||||
|
## Related Content
|
||||||
|
|
||||||
|
- Blog: [ClickHouse: A Blazingly Fast DBMS with Full SQL Join Support - Part 1](https://clickhouse.com/blog/clickhouse-fully-supports-joins)
|
||||||
|
|
||||||
## Supported Types of JOIN
|
## Supported Types of JOIN
|
||||||
|
|
||||||
All standard [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) types are supported:
|
All standard [SQL JOIN](https://en.wikipedia.org/wiki/Join_(SQL)) types are supported:
|
||||||
|
99
docs/en/sql-reference/statements/undrop.md
Normal file
99
docs/en/sql-reference/statements/undrop.md
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/statements/undrop
|
||||||
|
sidebar_label: UNDROP
|
||||||
|
---
|
||||||
|
|
||||||
|
# UNDROP TABLE
|
||||||
|
|
||||||
|
Cancels the dropping of the table.
|
||||||
|
|
||||||
|
Beginning with ClickHouse version 23.3 it is possible to UNDROP a table in an Atomic database
|
||||||
|
within `database_atomic_delay_before_drop_table_sec` (8 minutes by default) of issuing the DROP TABLE statement. Dropped tables are listed in
|
||||||
|
a system table called `system.dropped_tables`.
|
||||||
|
|
||||||
|
If you have a materialized view without a `TO` clause associated with the dropped table, then you will also have to UNDROP the inner table of that view.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
UNDROP TABLE is experimental. To use it add this setting:
|
||||||
|
```sql
|
||||||
|
set allow_experimental_undrop_table_query = 1;
|
||||||
|
```
|
||||||
|
:::
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
Also see [DROP TABLE](/docs/en/sql-reference/statements/drop.md)
|
||||||
|
:::
|
||||||
|
|
||||||
|
Syntax:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
UNDROP TABLE [db.]name [UUID '<uuid>'] [ON CLUSTER cluster]
|
||||||
|
```
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
set allow_experimental_undrop_table_query = 1;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE undropMe
|
||||||
|
(
|
||||||
|
`id` UInt8
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY id
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE undropMe
|
||||||
|
```
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM system.dropped_tables
|
||||||
|
FORMAT Vertical
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
index: 0
|
||||||
|
database: default
|
||||||
|
table: undropMe
|
||||||
|
uuid: aa696a1a-1d70-4e60-a841-4c80827706cc
|
||||||
|
engine: MergeTree
|
||||||
|
metadata_dropped_path: /var/lib/clickhouse/metadata_dropped/default.undropMe.aa696a1a-1d70-4e60-a841-4c80827706cc.sql
|
||||||
|
table_dropped_time: 2023-04-05 14:12:12
|
||||||
|
|
||||||
|
1 row in set. Elapsed: 0.001 sec.
|
||||||
|
```
|
||||||
|
```sql
|
||||||
|
UNDROP TABLE undropMe
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
Ok.
|
||||||
|
```
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM system.dropped_tables
|
||||||
|
FORMAT Vertical
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
Ok.
|
||||||
|
|
||||||
|
0 rows in set. Elapsed: 0.001 sec.
|
||||||
|
```
|
||||||
|
```sql
|
||||||
|
DESCRIBE TABLE undropMe
|
||||||
|
FORMAT Vertical
|
||||||
|
```
|
||||||
|
```response
|
||||||
|
Row 1:
|
||||||
|
──────
|
||||||
|
name: id
|
||||||
|
type: UInt8
|
||||||
|
default_type:
|
||||||
|
default_expression:
|
||||||
|
comment:
|
||||||
|
codec_expression:
|
||||||
|
ttl_expression:
|
||||||
|
```
|
@ -14,7 +14,7 @@ The `INSERT` query uses both parsers:
|
|||||||
INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
|
INSERT INTO t VALUES (1, 'Hello, world'), (2, 'abc'), (3, 'def')
|
||||||
```
|
```
|
||||||
|
|
||||||
The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings-formats.md#settings-input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#syntax-expressions).
|
The `INSERT INTO t VALUES` fragment is parsed by the full parser, and the data `(1, 'Hello, world'), (2, 'abc'), (3, 'def')` is parsed by the fast stream parser. You can also turn on the full parser for the data by using the [input_format_values_interpret_expressions](../operations/settings/settings-formats.md#input_format_values_interpret_expressions) setting. When `input_format_values_interpret_expressions = 1`, ClickHouse first tries to parse values with the fast stream parser. If it fails, ClickHouse tries to use the full parser for the data, treating it like an SQL [expression](#expressions).
|
||||||
|
|
||||||
Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed.
|
Data can have any format. When a query is received, the server calculates no more than [max_query_size](../operations/settings/settings.md#settings-max_query_size) bytes of the request in RAM (by default, 1 MB), and the rest is stream parsed.
|
||||||
It allows for avoiding issues with large `INSERT` queries.
|
It allows for avoiding issues with large `INSERT` queries.
|
||||||
@ -45,7 +45,7 @@ You can check whether a data type name is case-sensitive in the [system.data_typ
|
|||||||
|
|
||||||
In contrast to standard SQL, all other keywords (including functions names) are **case-sensitive**.
|
In contrast to standard SQL, all other keywords (including functions names) are **case-sensitive**.
|
||||||
|
|
||||||
Keywords are not reserved; they are treated as such only in the corresponding context. If you use [identifiers](#syntax-identifiers) with the same name as the keywords, enclose them into double-quotes or backticks. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`.
|
Keywords are not reserved; they are treated as such only in the corresponding context. If you use [identifiers](#identifiers) with the same name as the keywords, enclose them into double-quotes or backticks. For example, the query `SELECT "FROM" FROM table_name` is valid if the table `table_name` has column with the name `"FROM"`.
|
||||||
|
|
||||||
## Identifiers
|
## Identifiers
|
||||||
|
|
||||||
@ -54,7 +54,7 @@ Identifiers are:
|
|||||||
- Cluster, database, table, partition, and column names.
|
- Cluster, database, table, partition, and column names.
|
||||||
- Functions.
|
- Functions.
|
||||||
- Data types.
|
- Data types.
|
||||||
- [Expression aliases](#syntax-expression_aliases).
|
- [Expression aliases](#expression_aliases).
|
||||||
|
|
||||||
Identifiers can be quoted or non-quoted. The latter is preferred.
|
Identifiers can be quoted or non-quoted. The latter is preferred.
|
||||||
|
|
||||||
@ -108,7 +108,7 @@ Depending on the data format (input or output), `NULL` may have a different repr
|
|||||||
|
|
||||||
There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation is also `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation.
|
There are many nuances to processing `NULL`. For example, if at least one of the arguments of a comparison operation is `NULL`, the result of this operation is also `NULL`. The same is true for multiplication, addition, and other operations. For more information, read the documentation for each operation.
|
||||||
|
|
||||||
In queries, you can check `NULL` using the [IS NULL](../sql-reference/operators/index.md#operator-is-null) and [IS NOT NULL](../sql-reference/operators/index.md) operators and the related functions `isNull` and `isNotNull`.
|
In queries, you can check `NULL` using the [IS NULL](../sql-reference/operators/index.md#is-null) and [IS NOT NULL](../sql-reference/operators/index.md#is-not-null) operators and the related functions `isNull` and `isNotNull`.
|
||||||
|
|
||||||
### Heredoc
|
### Heredoc
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ For example, the following SQL defines parameters named `a`, `b`, `c` and `d` -
|
|||||||
SET param_a = 13;
|
SET param_a = 13;
|
||||||
SET param_b = 'str';
|
SET param_b = 'str';
|
||||||
SET param_c = '2022-08-04 18:30:53';
|
SET param_c = '2022-08-04 18:30:53';
|
||||||
SET param_d = {'10': [11, 12], '13': [14, 15]}';
|
SET param_d = {'10': [11, 12], '13': [14, 15]};
|
||||||
|
|
||||||
SELECT
|
SELECT
|
||||||
{a: UInt32},
|
{a: UInt32},
|
||||||
@ -166,7 +166,7 @@ Result:
|
|||||||
|
|
||||||
If you are using `clickhouse-client`, the parameters are specified as `--param_name=value`. For example, the following parameter has the name `message` and it is retrieved as a `String`:
|
If you are using `clickhouse-client`, the parameters are specified as `--param_name=value`. For example, the following parameter has the name `message` and it is retrieved as a `String`:
|
||||||
|
|
||||||
```sql
|
```bash
|
||||||
clickhouse-client --param_message='hello' --query="SELECT {message: String}"
|
clickhouse-client --param_message='hello' --query="SELECT {message: String}"
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ Query parameters are not general text substitutions which can be used in arbitra
|
|||||||
## Functions
|
## Functions
|
||||||
|
|
||||||
Function calls are written like an identifier with a list of arguments (possibly empty) in round brackets. In contrast to standard SQL, the brackets are required, even for an empty argument list. Example: `now()`.
|
Function calls are written like an identifier with a list of arguments (possibly empty) in round brackets. In contrast to standard SQL, the brackets are required, even for an empty argument list. Example: `now()`.
|
||||||
There are regular and aggregate functions (see the section “Aggregate functions”). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions.
|
There are regular and aggregate functions (see the section [Aggregate functions](/docs/en/sql-reference/aggregate-functions/index.md)). Some aggregate functions can contain two lists of arguments in brackets. Example: `quantile (0.9) (x)`. These aggregate functions are called “parametric” functions, and the arguments in the first list are called “parameters”. The syntax of aggregate functions without parameters is the same as for regular functions.
|
||||||
|
|
||||||
## Operators
|
## Operators
|
||||||
|
|
||||||
@ -199,7 +199,7 @@ For example, the expression `1 + 2 * 3 + 4` is transformed to `plus(plus(1, mult
|
|||||||
|
|
||||||
## Data Types and Database Table Engines
|
## Data Types and Database Table Engines
|
||||||
|
|
||||||
Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an argument list in brackets. For more information, see the sections “Data types,” “Table engines,” and “CREATE”.
|
Data types and table engines in the `CREATE` query are written the same way as identifiers or functions. In other words, they may or may not contain an argument list in brackets. For more information, see the sections [Data types](/docs/en/sql-reference/data-types/index.md), [Table engines](/docs/en/engines/table-engines/index.md), and [CREATE](/docs/en/sql-reference/statements/create/index.md).
|
||||||
|
|
||||||
## Expression Aliases
|
## Expression Aliases
|
||||||
|
|
||||||
@ -211,17 +211,17 @@ expr AS alias
|
|||||||
|
|
||||||
- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` clause without using the `AS` keyword.
|
- `AS` — The keyword for defining aliases. You can define the alias for a table name or a column name in a `SELECT` clause without using the `AS` keyword.
|
||||||
|
|
||||||
For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`.
|
For example, `SELECT table_name_alias.column_name FROM table_name table_name_alias`.
|
||||||
|
|
||||||
In the [CAST](./functions/type-conversion-functions.md#type_conversion_function-cast) function, the `AS` keyword has another meaning. See the description of the function.
|
In the [CAST](./functions/type-conversion-functions.md#castx-t) function, the `AS` keyword has another meaning. See the description of the function.
|
||||||
|
|
||||||
- `expr` — Any expression supported by ClickHouse.
|
- `expr` — Any expression supported by ClickHouse.
|
||||||
|
|
||||||
For example, `SELECT column_name * 2 AS double FROM some_table`.
|
For example, `SELECT column_name * 2 AS double FROM some_table`.
|
||||||
|
|
||||||
- `alias` — Name for `expr`. Aliases should comply with the [identifiers](#syntax-identifiers) syntax.
|
- `alias` — Name for `expr`. Aliases should comply with the [identifiers](#identifiers) syntax.
|
||||||
|
|
||||||
For example, `SELECT "table t".column_name FROM table_name AS "table t"`.
|
For example, `SELECT "table t".column_name FROM table_name AS "table t"`.
|
||||||
|
|
||||||
### Notes on Usage
|
### Notes on Usage
|
||||||
|
|
||||||
@ -254,11 +254,11 @@ Received exception from server (version 18.14.17):
|
|||||||
Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query.
|
Code: 184. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Aggregate function sum(b) is found inside another aggregate function in query.
|
||||||
```
|
```
|
||||||
|
|
||||||
In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. You can change this default behavior by setting [prefer_column_name_to_alias](../operations/settings/settings.md#prefer_column_name_to_alias) to `1`.
|
In this example, we declared table `t` with column `b`. Then, when selecting data, we defined the `sum(b) AS b` alias. As aliases are global, ClickHouse substituted the literal `b` in the expression `argMax(a, b)` with the expression `sum(b)`. This substitution caused the exception. You can change this default behavior by setting [prefer_column_name_to_alias](../operations/settings/settings.md#prefer-column-name-to-alias) to `1`.
|
||||||
|
|
||||||
## Asterisk
|
## Asterisk
|
||||||
|
|
||||||
In a `SELECT` query, an asterisk can replace the expression. For more information, see the section “SELECT”.
|
In a `SELECT` query, an asterisk can replace the expression. For more information, see the section [SELECT](/docs/en/sql-reference/statements/select/index.md#asterisk).
|
||||||
|
|
||||||
## Expressions
|
## Expressions
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ A key advantage between ordinary UDF functions and the `executable` table functi
|
|||||||
The `executable` table function requires three parameters and accepts an optional list of input queries:
|
The `executable` table function requires three parameters and accepts an optional list of input queries:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
executable(script_name, format, structure, [input_query...])
|
executable(script_name, format, structure, [input_query...] [,SETTINGS ...])
|
||||||
```
|
```
|
||||||
|
|
||||||
- `script_name`: the file name of the script. saved in the `user_scripts` folder (the default folder of the `user_scripts_path` setting)
|
- `script_name`: the file name of the script. saved in the `user_scripts` folder (the default folder of the `user_scripts_path` setting)
|
||||||
@ -83,6 +83,15 @@ The response looks like:
|
|||||||
└────┴────────────┘
|
└────┴────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Settings
|
||||||
|
|
||||||
|
- `send_chunk_header` - controls whether to send row count before sending a chunk of data to process. Default value is `false`.
|
||||||
|
- `pool_size` — Size of pool. If 0 is specified as `pool_size` then there is no pool size restrictions. Default value is `16`.
|
||||||
|
- `max_command_execution_time` — Maximum executable script command execution time for processing block of data. Specified in seconds. Default value is 10.
|
||||||
|
- `command_termination_timeout` — executable script should contain main read-write loop. After table function is destroyed, pipe is closed, and executable file will have `command_termination_timeout` seconds to shutdown, before ClickHouse will send SIGTERM signal to child process. Specified in seconds. Default value is 10.
|
||||||
|
- `command_read_timeout` - timeout for reading data from command stdout in milliseconds. Default value 10000.
|
||||||
|
- `command_write_timeout` - timeout for writing data to command stdin in milliseconds. Default value 10000.
|
||||||
|
|
||||||
## Passing Query Results to a Script
|
## Passing Query Results to a Script
|
||||||
|
|
||||||
Be sure to check out the example in the `Executable` table engine on [how to pass query results to a script](../../engines/table-engines/special/executable.md#passing-query-results-to-a-script). Here is how you execute the same script in that example using the `executable` table function:
|
Be sure to check out the example in the `Executable` table engine on [how to pass query results to a script](../../engines/table-engines/special/executable.md#passing-query-results-to-a-script). Here is how you execute the same script in that example using the `executable` table function:
|
||||||
@ -94,4 +103,4 @@ SELECT * FROM executable(
|
|||||||
'id UInt64, sentiment Float32',
|
'id UInt64, sentiment Float32',
|
||||||
(SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20)
|
(SELECT id, comment FROM hackernews WHERE id > 0 AND comment != '' LIMIT 20)
|
||||||
);
|
);
|
||||||
```
|
```
|
||||||
|
@ -133,4 +133,6 @@ CREATE TABLE pg_table_schema_with_dots (a UInt32)
|
|||||||
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-postgresql)
|
- [Using PostgreSQL as a dictionary source](../../sql-reference/dictionaries/index.md#dictionary-sources#dicts-external_dicts_dict_sources-postgresql)
|
||||||
|
|
||||||
## Related content
|
## Related content
|
||||||
|
|
||||||
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
- Blog: [ClickHouse and PostgreSQL - a match made in data heaven - part 1](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres)
|
||||||
|
- Blog: [ClickHouse and PostgreSQL - a Match Made in Data Heaven - part 2](https://clickhouse.com/blog/migrating-data-between-clickhouse-postgres-part-2)
|
||||||
|
@ -41,9 +41,15 @@ ClickHouse не работает и не собирается на 32-битны
|
|||||||
|
|
||||||
Выполните в терминале:
|
Выполните в терминале:
|
||||||
|
|
||||||
git clone git@github.com:your_github_username/ClickHouse.git --recursive
|
git clone --shallow-submodules git@github.com:your_github_username/ClickHouse.git
|
||||||
cd ClickHouse
|
cd ClickHouse
|
||||||
|
|
||||||
|
Или (если вы хотите использовать sparse checkout для submodules):
|
||||||
|
|
||||||
|
git clone git@github.com:your_github_username/ClickHouse.git
|
||||||
|
cd ClickHouse
|
||||||
|
./contrib/update-submodules.sh
|
||||||
|
|
||||||
Замените слово `your_github_username` в команде для git на имя вашего аккаунта на GitHub.
|
Замените слово `your_github_username` в команде для git на имя вашего аккаунта на GitHub.
|
||||||
|
|
||||||
Эта команда создаст директорию ClickHouse, содержащую рабочую копию проекта.
|
Эта команда создаст директорию ClickHouse, содержащую рабочую копию проекта.
|
||||||
|
@ -7,7 +7,7 @@ sidebar_position: 141
|
|||||||
|
|
||||||
Суммирует разницу между последовательными строками. Если разница отрицательна — она будет проигнорирована.
|
Суммирует разницу между последовательными строками. Если разница отрицательна — она будет проигнорирована.
|
||||||
|
|
||||||
Эта функция предназначена в первую очередь для [материализованных представлений](../../../sql-reference/statements/create/view.md#materialized), упорядоченных по некоторому временному бакету согласно timestamp, например, по бакету `toStartOfMinute`. Поскольку строки в таком материализованном представлении будут иметь одинаковый timestamp, невозможно объединить их в "правом" порядке. Функция отслеживает `timestamp` наблюдаемых значений, поэтому возможно правильно упорядочить состояния во время слияния.
|
Эта функция предназначена в первую очередь для [материализованных представлений](../../../sql-reference/statements/create/view.md#materialized), хранящих данные, упорядоченные по некоторому округленному временному интервалу, согласно timestamp, например, по бакету `toStartOfMinute`. Поскольку строки в таком материализованном представлении будут иметь одинаковый timestamp, их невозможно объединить в правильном порядке без хранения исходного, неокругленного значения timestamp. Функция `deltaSumTimestamp` отслеживает исходные `timestamp` наблюдаемых значений, поэтому значения (состояния) функции правильно вычисляются во время слияния кусков.
|
||||||
|
|
||||||
Чтобы вычислить разницу между упорядоченными последовательными строками, вы можете использовать функцию [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) вместо функции `deltaSumTimestamp`.
|
Чтобы вычислить разницу между упорядоченными последовательными строками, вы можете использовать функцию [deltaSum](../../../sql-reference/aggregate-functions/reference/deltasum.md#agg_functions-deltasum) вместо функции `deltaSumTimestamp`.
|
||||||
|
|
||||||
|
@ -168,3 +168,15 @@ SELECT format('{} {}', 'Hello', 'World')
|
|||||||
## trimBoth(s) {#trimboths}
|
## trimBoth(s) {#trimboths}
|
||||||
|
|
||||||
返回一个字符串,用于删除任一侧的空白字符。
|
返回一个字符串,用于删除任一侧的空白字符。
|
||||||
|
|
||||||
|
## soundex(s)
|
||||||
|
|
||||||
|
返回一个字符串的soundex值。输出类型是FixedString,示例如下:
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
select soundex('aksql');
|
||||||
|
|
||||||
|
┌─soundex('aksel')─┐
|
||||||
|
│ A240 │
|
||||||
|
└──────────────────┘
|
||||||
|
```
|
||||||
|
@ -277,11 +277,11 @@ void Client::initialize(Poco::Util::Application & self)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
const char * env_user = getenv("CLICKHOUSE_USER"); // NOLINT(concurrency-mt-unsafe)
|
const char * env_user = getenv("CLICKHOUSE_USER"); // NOLINT(concurrency-mt-unsafe)
|
||||||
if (env_user)
|
if (env_user && !config().has("user"))
|
||||||
config().setString("user", env_user);
|
config().setString("user", env_user);
|
||||||
|
|
||||||
const char * env_password = getenv("CLICKHOUSE_PASSWORD"); // NOLINT(concurrency-mt-unsafe)
|
const char * env_password = getenv("CLICKHOUSE_PASSWORD"); // NOLINT(concurrency-mt-unsafe)
|
||||||
if (env_password)
|
if (env_password && !config().has("password"))
|
||||||
config().setString("password", env_password);
|
config().setString("password", env_password);
|
||||||
|
|
||||||
parseConnectionsCredentials();
|
parseConnectionsCredentials();
|
||||||
|
@ -375,15 +375,22 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
ReadBufferFromFile in(binary_self_path.string());
|
String source = binary_self_path.string();
|
||||||
WriteBufferFromFile out(main_bin_tmp_path.string());
|
String destination = main_bin_tmp_path.string();
|
||||||
copyData(in, out);
|
|
||||||
out.sync();
|
|
||||||
|
|
||||||
if (0 != fchmod(out.getFD(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
|
/// Try to make a hard link first, as an optimization.
|
||||||
|
/// It is possible if the source and the destination are on the same filesystems.
|
||||||
|
if (0 != link(source.c_str(), destination.c_str()))
|
||||||
|
{
|
||||||
|
ReadBufferFromFile in(binary_self_path.string());
|
||||||
|
WriteBufferFromFile out(main_bin_tmp_path.string());
|
||||||
|
copyData(in, out);
|
||||||
|
out.sync();
|
||||||
|
out.finalize();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (0 != chmod(destination.c_str(), S_IRUSR | S_IRGRP | S_IROTH | S_IXUSR | S_IXGRP | S_IXOTH))
|
||||||
throwFromErrno(fmt::format("Cannot chmod {}", main_bin_tmp_path.string()), ErrorCodes::SYSTEM_ERROR);
|
throwFromErrno(fmt::format("Cannot chmod {}", main_bin_tmp_path.string()), ErrorCodes::SYSTEM_ERROR);
|
||||||
|
|
||||||
out.finalize();
|
|
||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
{
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
#include <Poco/Net/TCPServerParams.h>
|
#include <Poco/Net/TCPServerParams.h>
|
||||||
#include <Poco/Net/TCPServer.h>
|
#include <Poco/Net/TCPServer.h>
|
||||||
#include <Poco/Util/HelpFormatter.h>
|
#include <Poco/Util/HelpFormatter.h>
|
||||||
#include <Poco/Version.h>
|
|
||||||
#include <Poco/Environment.h>
|
#include <Poco/Environment.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
|
@ -135,6 +135,7 @@ namespace CurrentMetrics
|
|||||||
extern const Metric Revision;
|
extern const Metric Revision;
|
||||||
extern const Metric VersionInteger;
|
extern const Metric VersionInteger;
|
||||||
extern const Metric MemoryTracking;
|
extern const Metric MemoryTracking;
|
||||||
|
extern const Metric MergesMutationsMemoryTracking;
|
||||||
extern const Metric MaxDDLEntryID;
|
extern const Metric MaxDDLEntryID;
|
||||||
extern const Metric MaxPushedDDLEntryID;
|
extern const Metric MaxPushedDDLEntryID;
|
||||||
}
|
}
|
||||||
@ -981,7 +982,7 @@ try
|
|||||||
|
|
||||||
StatusFile status{path / "status", StatusFile::write_full_info};
|
StatusFile status{path / "status", StatusFile::write_full_info};
|
||||||
|
|
||||||
DB::ServerUUID::load(path / "uuid", log);
|
ServerUUID::load(path / "uuid", log);
|
||||||
|
|
||||||
/// Try to increase limit on number of open files.
|
/// Try to increase limit on number of open files.
|
||||||
{
|
{
|
||||||
@ -1225,6 +1226,25 @@ try
|
|||||||
total_memory_tracker.setDescription("(total)");
|
total_memory_tracker.setDescription("(total)");
|
||||||
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
total_memory_tracker.setMetric(CurrentMetrics::MemoryTracking);
|
||||||
|
|
||||||
|
size_t merges_mutations_memory_usage_soft_limit = server_settings.merges_mutations_memory_usage_soft_limit;
|
||||||
|
|
||||||
|
size_t default_merges_mutations_server_memory_usage = static_cast<size_t>(memory_amount * server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||||
|
if (merges_mutations_memory_usage_soft_limit == 0 || merges_mutations_memory_usage_soft_limit > default_merges_mutations_server_memory_usage)
|
||||||
|
{
|
||||||
|
merges_mutations_memory_usage_soft_limit = default_merges_mutations_server_memory_usage;
|
||||||
|
LOG_WARNING(log, "Setting merges_mutations_memory_usage_soft_limit was set to {}"
|
||||||
|
" ({} available * {:.2f} merges_mutations_memory_usage_to_ram_ratio)",
|
||||||
|
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit),
|
||||||
|
formatReadableSizeWithBinarySuffix(memory_amount),
|
||||||
|
server_settings.merges_mutations_memory_usage_to_ram_ratio);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(log, "Merges and mutations memory limit is set to {}",
|
||||||
|
formatReadableSizeWithBinarySuffix(merges_mutations_memory_usage_soft_limit));
|
||||||
|
background_memory_tracker.setSoftLimit(merges_mutations_memory_usage_soft_limit);
|
||||||
|
background_memory_tracker.setDescription("(background)");
|
||||||
|
background_memory_tracker.setMetric(CurrentMetrics::MergesMutationsMemoryTracking);
|
||||||
|
|
||||||
total_memory_tracker.setAllowUseJemallocMemory(server_settings.allow_use_jemalloc_memory);
|
total_memory_tracker.setAllowUseJemallocMemory(server_settings.allow_use_jemalloc_memory);
|
||||||
|
|
||||||
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
auto * global_overcommit_tracker = global_context->getGlobalOvercommitTracker();
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <Interpreters/Access/InterpreterCreateUserQuery.h>
|
#include <Interpreters/Access/InterpreterCreateUserQuery.h>
|
||||||
#include <Interpreters/Access/InterpreterShowGrantsQuery.h>
|
#include <Interpreters/Access/InterpreterShowGrantsQuery.h>
|
||||||
#include <Common/logger_useful.h>
|
#include <Common/logger_useful.h>
|
||||||
|
#include <Common/ThreadPool.h>
|
||||||
#include <Poco/JSON/JSON.h>
|
#include <Poco/JSON/JSON.h>
|
||||||
#include <Poco/JSON/Object.h>
|
#include <Poco/JSON/Object.h>
|
||||||
#include <Poco/JSON/Stringifier.h>
|
#include <Poco/JSON/Stringifier.h>
|
||||||
@ -19,6 +20,7 @@
|
|||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -317,15 +319,15 @@ void DiskAccessStorage::scheduleWriteLists(AccessEntityType type)
|
|||||||
return; /// If the lists' writing thread is still waiting we can update `types_of_lists_to_write` easily,
|
return; /// If the lists' writing thread is still waiting we can update `types_of_lists_to_write` easily,
|
||||||
/// without restarting that thread.
|
/// without restarting that thread.
|
||||||
|
|
||||||
if (lists_writing_thread.joinable())
|
if (lists_writing_thread && lists_writing_thread->joinable())
|
||||||
lists_writing_thread.join();
|
lists_writing_thread->join();
|
||||||
|
|
||||||
/// Create the 'need_rebuild_lists.mark' file.
|
/// Create the 'need_rebuild_lists.mark' file.
|
||||||
/// This file will be used later to find out if writing lists is successful or not.
|
/// This file will be used later to find out if writing lists is successful or not.
|
||||||
std::ofstream out{getNeedRebuildListsMarkFilePath(directory_path)};
|
std::ofstream out{getNeedRebuildListsMarkFilePath(directory_path)};
|
||||||
out.close();
|
out.close();
|
||||||
|
|
||||||
lists_writing_thread = ThreadFromGlobalPool{&DiskAccessStorage::listsWritingThreadFunc, this};
|
lists_writing_thread = std::make_unique<ThreadFromGlobalPool>(&DiskAccessStorage::listsWritingThreadFunc, this);
|
||||||
lists_writing_thread_is_waiting = true;
|
lists_writing_thread_is_waiting = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,10 +351,10 @@ void DiskAccessStorage::listsWritingThreadFunc()
|
|||||||
|
|
||||||
void DiskAccessStorage::stopListsWritingThread()
|
void DiskAccessStorage::stopListsWritingThread()
|
||||||
{
|
{
|
||||||
if (lists_writing_thread.joinable())
|
if (lists_writing_thread && lists_writing_thread->joinable())
|
||||||
{
|
{
|
||||||
lists_writing_thread_should_exit.notify_one();
|
lists_writing_thread_should_exit.notify_one();
|
||||||
lists_writing_thread.join();
|
lists_writing_thread->join();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Access/MemoryAccessStorage.h>
|
#include <Access/MemoryAccessStorage.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool_fwd.h>
|
||||||
#include <boost/container/flat_set.hpp>
|
#include <boost/container/flat_set.hpp>
|
||||||
|
|
||||||
|
|
||||||
@ -81,7 +81,7 @@ private:
|
|||||||
bool failed_to_write_lists TSA_GUARDED_BY(mutex) = false;
|
bool failed_to_write_lists TSA_GUARDED_BY(mutex) = false;
|
||||||
|
|
||||||
/// List files are written in a separate thread.
|
/// List files are written in a separate thread.
|
||||||
ThreadFromGlobalPool lists_writing_thread;
|
std::unique_ptr<ThreadFromGlobalPool> lists_writing_thread;
|
||||||
|
|
||||||
/// Signals `lists_writing_thread` to exit.
|
/// Signals `lists_writing_thread` to exit.
|
||||||
std::condition_variable lists_writing_thread_should_exit;
|
std::condition_variable lists_writing_thread_should_exit;
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#include <memory>
|
||||||
#include <Access/AccessEntityIO.h>
|
#include <Access/AccessEntityIO.h>
|
||||||
#include <Access/MemoryAccessStorage.h>
|
#include <Access/MemoryAccessStorage.h>
|
||||||
#include <Access/ReplicatedAccessStorage.h>
|
#include <Access/ReplicatedAccessStorage.h>
|
||||||
@ -15,6 +16,7 @@
|
|||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||||
#include <Common/escapeForFileName.h>
|
#include <Common/escapeForFileName.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
|
#include <Common/ThreadPool.h>
|
||||||
#include <base/range.h>
|
#include <base/range.h>
|
||||||
#include <base/sleep.h>
|
#include <base/sleep.h>
|
||||||
#include <boost/range/algorithm_ext/erase.hpp>
|
#include <boost/range/algorithm_ext/erase.hpp>
|
||||||
@ -72,7 +74,7 @@ void ReplicatedAccessStorage::startWatchingThread()
|
|||||||
{
|
{
|
||||||
bool prev_watching_flag = watching.exchange(true);
|
bool prev_watching_flag = watching.exchange(true);
|
||||||
if (!prev_watching_flag)
|
if (!prev_watching_flag)
|
||||||
watching_thread = ThreadFromGlobalPool(&ReplicatedAccessStorage::runWatchingThread, this);
|
watching_thread = std::make_unique<ThreadFromGlobalPool>(&ReplicatedAccessStorage::runWatchingThread, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReplicatedAccessStorage::stopWatchingThread()
|
void ReplicatedAccessStorage::stopWatchingThread()
|
||||||
@ -81,8 +83,8 @@ void ReplicatedAccessStorage::stopWatchingThread()
|
|||||||
if (prev_watching_flag)
|
if (prev_watching_flag)
|
||||||
{
|
{
|
||||||
watched_queue->finish();
|
watched_queue->finish();
|
||||||
if (watching_thread.joinable())
|
if (watching_thread && watching_thread->joinable())
|
||||||
watching_thread.join();
|
watching_thread->join();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool_fwd.h>
|
||||||
#include <Common/ZooKeeper/Common.h>
|
#include <Common/ZooKeeper/Common.h>
|
||||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||||
#include <Common/ConcurrentBoundedQueue.h>
|
#include <Common/ConcurrentBoundedQueue.h>
|
||||||
@ -21,7 +21,7 @@ public:
|
|||||||
static constexpr char STORAGE_TYPE[] = "replicated";
|
static constexpr char STORAGE_TYPE[] = "replicated";
|
||||||
|
|
||||||
ReplicatedAccessStorage(const String & storage_name, const String & zookeeper_path, zkutil::GetZooKeeper get_zookeeper, AccessChangesNotifier & changes_notifier_, bool allow_backup);
|
ReplicatedAccessStorage(const String & storage_name, const String & zookeeper_path, zkutil::GetZooKeeper get_zookeeper, AccessChangesNotifier & changes_notifier_, bool allow_backup);
|
||||||
virtual ~ReplicatedAccessStorage() override;
|
~ReplicatedAccessStorage() override;
|
||||||
|
|
||||||
const char * getStorageType() const override { return STORAGE_TYPE; }
|
const char * getStorageType() const override { return STORAGE_TYPE; }
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ private:
|
|||||||
std::mutex cached_zookeeper_mutex;
|
std::mutex cached_zookeeper_mutex;
|
||||||
|
|
||||||
std::atomic<bool> watching = false;
|
std::atomic<bool> watching = false;
|
||||||
ThreadFromGlobalPool watching_thread;
|
std::unique_ptr<ThreadFromGlobalPool> watching_thread;
|
||||||
std::shared_ptr<ConcurrentBoundedQueue<UUID>> watched_queue;
|
std::shared_ptr<ConcurrentBoundedQueue<UUID>> watched_queue;
|
||||||
|
|
||||||
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
std::optional<UUID> insertImpl(const AccessEntityPtr & entity, bool replace_if_exists, bool throw_if_exists) override;
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Common/logger_useful.h>
|
|
||||||
#include <base/sort.h>
|
#include <base/sort.h>
|
||||||
|
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
|
@ -11,7 +11,6 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <Columns/ColumnString.h>
|
#include <Columns/ColumnString.h>
|
||||||
#include <Common/PODArray.h>
|
#include <Common/PODArray.h>
|
||||||
#include <Common/logger_useful.h>
|
|
||||||
#include <IO/ReadBufferFromString.h>
|
#include <IO/ReadBufferFromString.h>
|
||||||
#include <Common/HashTable/HashMap.h>
|
#include <Common/HashTable/HashMap.h>
|
||||||
#include <Columns/IColumn.h>
|
#include <Columns/IColumn.h>
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
#include <AggregateFunctions/FactoryHelpers.h>
|
#include <AggregateFunctions/FactoryHelpers.h>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <Common/logger_useful.h>
|
|
||||||
#include <Common/ClickHouseRevision.h>
|
#include <Common/ClickHouseRevision.h>
|
||||||
|
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
#include <Interpreters/Context_fwd.h>
|
#include <Interpreters/Context_fwd.h>
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool_fwd.h>
|
||||||
#include <Core/IResolvedFunction.h>
|
#include <Core/IResolvedFunction.h>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
|
|
||||||
#include <TableFunctions/TableFunctionFactory.h>
|
#include <TableFunctions/TableFunctionFactory.h>
|
||||||
|
#include <Formats/FormatFactory.h>
|
||||||
|
|
||||||
#include <Databases/IDatabase.h>
|
#include <Databases/IDatabase.h>
|
||||||
|
|
||||||
@ -75,6 +76,7 @@
|
|||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
#include <Analyzer/QueryTreeBuilder.h>
|
#include <Analyzer/QueryTreeBuilder.h>
|
||||||
#include <Analyzer/IQueryTreeNode.h>
|
#include <Analyzer/IQueryTreeNode.h>
|
||||||
|
#include <Analyzer/Identifier.h>
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
@ -112,6 +114,8 @@ namespace ErrorCodes
|
|||||||
extern const int ALIAS_REQUIRED;
|
extern const int ALIAS_REQUIRED;
|
||||||
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
extern const int NUMBER_OF_ARGUMENTS_DOESNT_MATCH;
|
||||||
extern const int UNKNOWN_TABLE;
|
extern const int UNKNOWN_TABLE;
|
||||||
|
extern const int ILLEGAL_COLUMN;
|
||||||
|
extern const int NUMBER_OF_COLUMNS_DOESNT_MATCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h before.
|
/** Query analyzer implementation overview. Please check documentation in QueryAnalysisPass.h before.
|
||||||
@ -6079,6 +6083,18 @@ void QueryAnalyzer::initializeTableExpressionData(const QueryTreeNodePtr & table
|
|||||||
scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data));
|
scope.table_expression_node_to_data.emplace(table_expression_node, std::move(table_expression_data));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool findIdentifier(const FunctionNode & function)
|
||||||
|
{
|
||||||
|
for (const auto & argument : function.getArguments())
|
||||||
|
{
|
||||||
|
if (argument->as<IdentifierNode>())
|
||||||
|
return true;
|
||||||
|
if (const auto * f = argument->as<FunctionNode>(); f && findIdentifier(*f))
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/// Resolve table function node in scope
|
/// Resolve table function node in scope
|
||||||
void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
||||||
IdentifierResolveScope & scope,
|
IdentifierResolveScope & scope,
|
||||||
@ -6090,12 +6106,11 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
|||||||
if (!nested_table_function)
|
if (!nested_table_function)
|
||||||
expressions_visitor.visit(table_function_node_typed.getArgumentsNode());
|
expressions_visitor.visit(table_function_node_typed.getArgumentsNode());
|
||||||
|
|
||||||
const auto & table_function_factory = TableFunctionFactory::instance();
|
|
||||||
const auto & table_function_name = table_function_node_typed.getTableFunctionName();
|
const auto & table_function_name = table_function_node_typed.getTableFunctionName();
|
||||||
|
|
||||||
auto & scope_context = scope.context;
|
auto & scope_context = scope.context;
|
||||||
|
|
||||||
TableFunctionPtr table_function_ptr = table_function_factory.tryGet(table_function_name, scope_context);
|
TableFunctionPtr table_function_ptr = TableFunctionFactory::instance().tryGet(table_function_name, scope_context);
|
||||||
if (!table_function_ptr)
|
if (!table_function_ptr)
|
||||||
{
|
{
|
||||||
auto hints = TableFunctionFactory::instance().getHints(table_function_name);
|
auto hints = TableFunctionFactory::instance().getHints(table_function_name);
|
||||||
@ -6110,17 +6125,131 @@ void QueryAnalyzer::resolveTableFunction(QueryTreeNodePtr & table_function_node,
|
|||||||
table_function_name);
|
table_function_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t use_structure_from_insertion_table_in_table_functions = scope_context->getSettingsRef().use_structure_from_insertion_table_in_table_functions;
|
||||||
if (!nested_table_function &&
|
if (!nested_table_function &&
|
||||||
scope_context->getSettingsRef().use_structure_from_insertion_table_in_table_functions &&
|
use_structure_from_insertion_table_in_table_functions &&
|
||||||
scope_context->hasInsertionTable() &&
|
scope_context->hasInsertionTable() &&
|
||||||
table_function_ptr->needStructureHint())
|
table_function_ptr->needStructureHint())
|
||||||
{
|
{
|
||||||
const auto & insertion_table = scope_context->getInsertionTable();
|
const auto & insertion_table = scope_context->getInsertionTable();
|
||||||
if (!insertion_table.empty())
|
if (!insertion_table.empty())
|
||||||
{
|
{
|
||||||
auto insertion_table_storage = DatabaseCatalog::instance().getTable(insertion_table, scope_context);
|
const auto & insert_structure = DatabaseCatalog::instance().getTable(insertion_table, scope_context)->getInMemoryMetadataPtr()->getColumns();
|
||||||
const auto & structure_hint = insertion_table_storage->getInMemoryMetadataPtr()->columns;
|
DB::ColumnsDescription structure_hint;
|
||||||
table_function_ptr->setStructureHint(structure_hint);
|
|
||||||
|
bool use_columns_from_insert_query = true;
|
||||||
|
|
||||||
|
/// Insert table matches columns against SELECT expression by position, so we want to map
|
||||||
|
/// insert table columns to table function columns through names from SELECT expression.
|
||||||
|
|
||||||
|
auto insert_column = insert_structure.begin();
|
||||||
|
auto insert_structure_end = insert_structure.end(); /// end iterator of the range covered by possible asterisk
|
||||||
|
auto virtual_column_names = table_function_ptr->getVirtualsToCheckBeforeUsingStructureHint();
|
||||||
|
bool asterisk = false;
|
||||||
|
const auto & expression_list = scope.scope_node->as<QueryNode &>().getProjection();
|
||||||
|
auto expression = expression_list.begin();
|
||||||
|
|
||||||
|
/// We want to go through SELECT expression list and correspond each expression to column in insert table
|
||||||
|
/// which type will be used as a hint for the file structure inference.
|
||||||
|
for (; expression != expression_list.end() && insert_column != insert_structure_end; ++expression)
|
||||||
|
{
|
||||||
|
if (auto * identifier_node = (*expression)->as<IdentifierNode>())
|
||||||
|
{
|
||||||
|
|
||||||
|
if (!virtual_column_names.contains(identifier_node->getIdentifier().getFullName()))
|
||||||
|
{
|
||||||
|
if (asterisk)
|
||||||
|
{
|
||||||
|
if (use_structure_from_insertion_table_in_table_functions == 1)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Asterisk cannot be mixed with column list in INSERT SELECT query.");
|
||||||
|
|
||||||
|
use_columns_from_insert_query = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
structure_hint.add({ identifier_node->getIdentifier().getFullName(), insert_column->type });
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Once we hit asterisk we want to find end of the range covered by asterisk
|
||||||
|
/// contributing every further SELECT expression to the tail of insert structure
|
||||||
|
if (asterisk)
|
||||||
|
--insert_structure_end;
|
||||||
|
else
|
||||||
|
++insert_column;
|
||||||
|
}
|
||||||
|
else if (auto * matcher_node = (*expression)->as<MatcherNode>(); matcher_node && matcher_node->getMatcherType() == MatcherNodeType::ASTERISK)
|
||||||
|
{
|
||||||
|
if (asterisk)
|
||||||
|
{
|
||||||
|
if (use_structure_from_insertion_table_in_table_functions == 1)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Only one asterisk can be used in INSERT SELECT query.");
|
||||||
|
|
||||||
|
use_columns_from_insert_query = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!structure_hint.empty())
|
||||||
|
{
|
||||||
|
if (use_structure_from_insertion_table_in_table_functions == 1)
|
||||||
|
throw Exception(ErrorCodes::ILLEGAL_COLUMN, "Asterisk cannot be mixed with column list in INSERT SELECT query.");
|
||||||
|
|
||||||
|
use_columns_from_insert_query = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
asterisk = true;
|
||||||
|
}
|
||||||
|
else if (auto * function = (*expression)->as<FunctionNode>())
|
||||||
|
{
|
||||||
|
if (use_structure_from_insertion_table_in_table_functions == 2 && findIdentifier(*function))
|
||||||
|
{
|
||||||
|
use_columns_from_insert_query = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Once we hit asterisk we want to find end of the range covered by asterisk
|
||||||
|
/// contributing every further SELECT expression to the tail of insert structure
|
||||||
|
if (asterisk)
|
||||||
|
--insert_structure_end;
|
||||||
|
else
|
||||||
|
++insert_column;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
/// Once we hit asterisk we want to find end of the range covered by asterisk
|
||||||
|
/// contributing every further SELECT expression to the tail of insert structure
|
||||||
|
if (asterisk)
|
||||||
|
--insert_structure_end;
|
||||||
|
else
|
||||||
|
++insert_column;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_structure_from_insertion_table_in_table_functions == 2 && !asterisk)
|
||||||
|
{
|
||||||
|
/// For input function we should check if input format supports reading subset of columns.
|
||||||
|
if (table_function_ptr->getName() == "input")
|
||||||
|
use_columns_from_insert_query = FormatFactory::instance().checkIfFormatSupportsSubsetOfColumns(scope.context->getInsertFormat());
|
||||||
|
else
|
||||||
|
use_columns_from_insert_query = table_function_ptr->supportsReadingSubsetOfColumns();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (use_columns_from_insert_query)
|
||||||
|
{
|
||||||
|
if (expression == expression_list.end())
|
||||||
|
{
|
||||||
|
/// Append tail of insert structure to the hint
|
||||||
|
if (asterisk)
|
||||||
|
{
|
||||||
|
for (; insert_column != insert_structure_end; ++insert_column)
|
||||||
|
structure_hint.add({ insert_column->name, insert_column->type });
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!structure_hint.empty())
|
||||||
|
table_function_ptr->setStructureHint(structure_hint);
|
||||||
|
|
||||||
|
} else if (use_structure_from_insertion_table_in_table_functions == 1)
|
||||||
|
throw Exception(ErrorCodes::NUMBER_OF_COLUMNS_DOESNT_MATCH, "Number of columns in insert table less than required by SELECT expression.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include <Backups/BackupCoordinationFileInfos.h>
|
#include <Backups/BackupCoordinationFileInfos.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -771,16 +771,19 @@ bool BackupCoordinationRemote::hasConcurrentBackups(const std::atomic<size_t> &)
|
|||||||
String existing_backup_uuid = existing_backup_path;
|
String existing_backup_uuid = existing_backup_path;
|
||||||
existing_backup_uuid.erase(0, String("backup-").size());
|
existing_backup_uuid.erase(0, String("backup-").size());
|
||||||
|
|
||||||
|
|
||||||
if (existing_backup_uuid == toString(backup_uuid))
|
if (existing_backup_uuid == toString(backup_uuid))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
const auto status = zk->get(root_zookeeper_path + "/" + existing_backup_path + "/stage");
|
String status;
|
||||||
if (status != Stage::COMPLETED)
|
if (zk->tryGet(root_zookeeper_path + "/" + existing_backup_path + "/stage", status))
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid));
|
/// If status is not COMPLETED it could be because the backup failed, check if 'error' exists
|
||||||
result = true;
|
if (status != Stage::COMPLETED && !zk->exists(root_zookeeper_path + "/" + existing_backup_path + "/error"))
|
||||||
return;
|
{
|
||||||
|
LOG_WARNING(log, "Found a concurrent backup: {}, current backup: {}", existing_backup_uuid, toString(backup_uuid));
|
||||||
|
result = true;
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <base/sleep.h>
|
#include <base/sleep.h>
|
||||||
#include <Common/escapeForFileName.h>
|
#include <Common/escapeForFileName.h>
|
||||||
#include <boost/range/algorithm/copy.hpp>
|
#include <boost/range/algorithm/copy.hpp>
|
||||||
|
#include <base/scope_guard.h>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
|
|
||||||
namespace fs = std::filesystem;
|
namespace fs = std::filesystem;
|
||||||
@ -490,7 +491,7 @@ std::vector<std::pair<ASTPtr, StoragePtr>> BackupEntriesCollector::findTablesInD
|
|||||||
{
|
{
|
||||||
/// Database or table could be replicated - so may use ZooKeeper. We need to retry.
|
/// Database or table could be replicated - so may use ZooKeeper. We need to retry.
|
||||||
auto zookeeper_retries_info = global_zookeeper_retries_info;
|
auto zookeeper_retries_info = global_zookeeper_retries_info;
|
||||||
ZooKeeperRetriesControl retries_ctl("getTablesForBackup", zookeeper_retries_info);
|
ZooKeeperRetriesControl retries_ctl("getTablesForBackup", zookeeper_retries_info, nullptr);
|
||||||
retries_ctl.retryLoop([&](){ db_tables = database->getTablesForBackup(filter_by_table_name, context); });
|
retries_ctl.retryLoop([&](){ db_tables = database->getTablesForBackup(filter_by_table_name, context); });
|
||||||
}
|
}
|
||||||
catch (Exception & e)
|
catch (Exception & e)
|
||||||
|
@ -8,10 +8,11 @@ namespace DB
|
|||||||
BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile(
|
BackupEntryFromAppendOnlyFile::BackupEntryFromAppendOnlyFile(
|
||||||
const DiskPtr & disk_,
|
const DiskPtr & disk_,
|
||||||
const String & file_path_,
|
const String & file_path_,
|
||||||
|
const ReadSettings & settings_,
|
||||||
const std::optional<UInt64> & file_size_,
|
const std::optional<UInt64> & file_size_,
|
||||||
const std::optional<UInt128> & checksum_,
|
const std::optional<UInt128> & checksum_,
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_)
|
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_)
|
||||||
: BackupEntryFromImmutableFile(disk_, file_path_, file_size_, checksum_, temporary_file_)
|
: BackupEntryFromImmutableFile(disk_, file_path_, settings_, file_size_, checksum_, temporary_file_)
|
||||||
, limit(BackupEntryFromImmutableFile::getSize())
|
, limit(BackupEntryFromImmutableFile::getSize())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@ public:
|
|||||||
BackupEntryFromAppendOnlyFile(
|
BackupEntryFromAppendOnlyFile(
|
||||||
const DiskPtr & disk_,
|
const DiskPtr & disk_,
|
||||||
const String & file_path_,
|
const String & file_path_,
|
||||||
|
const ReadSettings & settings_,
|
||||||
const std::optional<UInt64> & file_size_ = {},
|
const std::optional<UInt64> & file_size_ = {},
|
||||||
const std::optional<UInt128> & checksum_ = {},
|
const std::optional<UInt128> & checksum_ = {},
|
||||||
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_ = {});
|
const std::shared_ptr<TemporaryFileOnDisk> & temporary_file_ = {});
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user