Merge branch 'master' into refactoring-ip-types

This commit is contained in:
Yakov Olkhovskiy 2023-01-04 11:11:06 -05:00 committed by GitHub
commit 7a5a36cbed
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
219 changed files with 3882 additions and 1745 deletions

View File

@ -141,37 +141,6 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
SharedBuildSmokeTest:
needs: [BuilderDebShared]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/split_build_check
REPO_COPY=${{runner.temp}}/split_build_check/ClickHouse
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Shared build check
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 split_build_smoke_check.py
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
######################################################################################### #########################################################################################
#################################### ORDINARY BUILDS #################################### #################################### ORDINARY BUILDS ####################################
######################################################################################### #########################################################################################
@ -508,47 +477,6 @@ jobs:
########################################################################################## ##########################################################################################
##################################### SPECIAL BUILDS ##################################### ##################################### SPECIAL BUILDS #####################################
########################################################################################## ##########################################################################################
BuilderDebShared:
needs: [DockerHubPush]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_shared
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinClangTidy: BuilderBinClangTidy:
needs: [DockerHubPush] needs: [DockerHubPush]
runs-on: [self-hosted, builder] runs-on: [self-hosted, builder]
@ -968,7 +896,6 @@ jobs:
- BuilderBinAmd64Compat - BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat - BuilderBinAarch64V80Compat
- BuilderBinClangTidy - BuilderBinClangTidy
- BuilderDebShared
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]
if: ${{ success() || failure() }} if: ${{ success() || failure() }}
steps: steps:
@ -3139,7 +3066,6 @@ jobs:
- UnitTestsMsan - UnitTestsMsan
- UnitTestsUBsan - UnitTestsUBsan
- UnitTestsReleaseClang - UnitTestsReleaseClang
- SharedBuildSmokeTest
- SQLancerTestRelease - SQLancerTestRelease
- SQLancerTestDebug - SQLancerTestDebug
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]

View File

@ -203,37 +203,6 @@ jobs:
docker ps --quiet | xargs --no-run-if-empty docker kill ||: docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||: docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
SharedBuildSmokeTest:
needs: [BuilderDebShared]
runs-on: [self-hosted, style-checker]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/split_build_check
REPO_COPY=${{runner.temp}}/split_build_check/ClickHouse
REPORTS_PATH=${{runner.temp}}/reports_dir
EOF
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
- name: Download json reports
uses: actions/download-artifact@v3
with:
path: ${{ env.REPORTS_PATH }}
- name: Shared build check
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 split_build_smoke_check.py
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH"
######################################################################################### #########################################################################################
#################################### ORDINARY BUILDS #################################### #################################### ORDINARY BUILDS ####################################
######################################################################################### #########################################################################################
@ -570,47 +539,6 @@ jobs:
########################################################################################## ##########################################################################################
##################################### SPECIAL BUILDS ##################################### ##################################### SPECIAL BUILDS #####################################
########################################################################################## ##########################################################################################
BuilderDebShared:
needs: [DockerHubPush, FastTest, StyleCheck]
runs-on: [self-hosted, builder]
steps:
- name: Set envs
run: |
cat >> "$GITHUB_ENV" << 'EOF'
TEMP_PATH=${{runner.temp}}/build_check
IMAGES_PATH=${{runner.temp}}/images_path
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
CACHES_PATH=${{runner.temp}}/../ccaches
BUILD_NAME=binary_shared
EOF
- name: Download changed images
uses: actions/download-artifact@v3
with:
name: changed_images
path: ${{ env.IMAGES_PATH }}
- name: Check out repository code
uses: ClickHouse/checkout@v1
with:
clear-repository: true
submodules: true
- name: Build
run: |
sudo rm -fr "$TEMP_PATH"
mkdir -p "$TEMP_PATH"
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
- name: Upload build URLs to artifacts
if: ${{ success() || failure() }}
uses: actions/upload-artifact@v3
with:
name: ${{ env.BUILD_URLS }}
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
- name: Cleanup
if: always()
run: |
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
BuilderBinClangTidy: BuilderBinClangTidy:
needs: [DockerHubPush, FastTest, StyleCheck] needs: [DockerHubPush, FastTest, StyleCheck]
runs-on: [self-hosted, builder] runs-on: [self-hosted, builder]
@ -1018,12 +946,10 @@ jobs:
- BuilderBinDarwin - BuilderBinDarwin
- BuilderBinDarwinAarch64 - BuilderBinDarwinAarch64
- BuilderBinFreeBSD - BuilderBinFreeBSD
# - BuilderBinGCC
- BuilderBinPPC64 - BuilderBinPPC64
- BuilderBinAmd64Compat - BuilderBinAmd64Compat
- BuilderBinAarch64V80Compat - BuilderBinAarch64V80Compat
- BuilderBinClangTidy - BuilderBinClangTidy
- BuilderDebShared
runs-on: [self-hosted, style-checker] runs-on: [self-hosted, style-checker]
if: ${{ success() || failure() }} if: ${{ success() || failure() }}
steps: steps:
@ -2603,7 +2529,7 @@ jobs:
sudo rm -fr "$TEMP_PATH" sudo rm -fr "$TEMP_PATH"
TestsBugfixCheck: TestsBugfixCheck:
needs: [CheckLabels, StyleCheck] needs: [CheckLabels, StyleCheck]
runs-on: [self-hosted, stress-tester] runs-on: [self-hosted, func-tester]
steps: steps:
- name: Set envs - name: Set envs
run: | run: |
@ -2639,7 +2565,7 @@ jobs:
python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \ python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \
--validate-bugfix --post-commit-status=file || echo 'ignore exit code' --validate-bugfix --post-commit-status=file || echo 'ignore exit code'
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv" python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv"
- name: Cleanup - name: Cleanup
if: always() if: always()
run: | run: |
@ -4448,7 +4374,6 @@ jobs:
- UnitTestsMsan - UnitTestsMsan
- UnitTestsUBsan - UnitTestsUBsan
- UnitTestsReleaseClang - UnitTestsReleaseClang
- SharedBuildSmokeTest
- CompatibilityCheck - CompatibilityCheck
- IntegrationTestsFlakyCheck - IntegrationTestsFlakyCheck
- SQLancerTestRelease - SQLancerTestRelease

34
.gitmodules vendored
View File

@ -104,13 +104,13 @@
url = https://github.com/ClickHouse/aws-sdk-cpp.git url = https://github.com/ClickHouse/aws-sdk-cpp.git
[submodule "aws-c-event-stream"] [submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream path = contrib/aws-c-event-stream
url = https://github.com/ClickHouse/aws-c-event-stream.git url = https://github.com/awslabs/aws-c-event-stream.git
[submodule "aws-c-common"] [submodule "aws-c-common"]
path = contrib/aws-c-common path = contrib/aws-c-common
url = https://github.com/ClickHouse/aws-c-common.git url = https://github.com/ClickHouse/aws-c-common.git
[submodule "aws-checksums"] [submodule "aws-checksums"]
path = contrib/aws-checksums path = contrib/aws-checksums
url = https://github.com/ClickHouse/aws-checksums.git url = https://github.com/awslabs/aws-checksums.git
[submodule "contrib/curl"] [submodule "contrib/curl"]
path = contrib/curl path = contrib/curl
url = https://github.com/curl/curl.git url = https://github.com/curl/curl.git
@ -294,3 +294,33 @@
[submodule "contrib/libdivide"] [submodule "contrib/libdivide"]
path = contrib/libdivide path = contrib/libdivide
url = https://github.com/ridiculousfish/libdivide.git url = https://github.com/ridiculousfish/libdivide.git
[submodule "contrib/aws-crt-cpp"]
path = contrib/aws-crt-cpp
url = https://github.com/ClickHouse/aws-crt-cpp.git
[submodule "contrib/aws-c-io"]
path = contrib/aws-c-io
url = https://github.com/ClickHouse/aws-c-io.git
[submodule "contrib/aws-c-mqtt"]
path = contrib/aws-c-mqtt
url = https://github.com/awslabs/aws-c-mqtt.git
[submodule "contrib/aws-c-auth"]
path = contrib/aws-c-auth
url = https://github.com/awslabs/aws-c-auth.git
[submodule "contrib/aws-c-cal"]
path = contrib/aws-c-cal
url = https://github.com/ClickHouse/aws-c-cal.git
[submodule "contrib/aws-c-sdkutils"]
path = contrib/aws-c-sdkutils
url = https://github.com/awslabs/aws-c-sdkutils.git
[submodule "contrib/aws-c-http"]
path = contrib/aws-c-http
url = https://github.com/awslabs/aws-c-http.git
[submodule "contrib/aws-c-s3"]
path = contrib/aws-c-s3
url = https://github.com/awslabs/aws-c-s3.git
[submodule "contrib/aws-c-compression"]
path = contrib/aws-c-compression
url = https://github.com/awslabs/aws-c-compression.git
[submodule "contrib/aws-s2n-tls"]
path = contrib/aws-s2n-tls
url = https://github.com/aws/s2n-tls.git

View File

@ -73,22 +73,7 @@ message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC) string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
option(USE_STATIC_LIBRARIES "Disable to use shared libraries" ON) list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
# DEVELOPER ONLY.
# Faster linking if turned on.
option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files" OFF)
if (USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
message(FATAL_ERROR "SPLIT_SHARED_LIBRARIES=1 must not be used together with USE_STATIC_LIBRARIES=1")
endif()
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
set(BUILD_SHARED_LIBS 1 CACHE INTERNAL "")
endif ()
if (USE_STATIC_LIBRARIES)
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
endif ()
option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF) option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF)
@ -171,7 +156,7 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests"
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF) option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND USE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND NOT USE_MUSL) if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT USE_MUSL)
# Only for Linux, x86_64 or aarch64. # Only for Linux, x86_64 or aarch64.
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON) option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
elseif(GLIBC_COMPATIBILITY) elseif(GLIBC_COMPATIBILITY)
@ -467,22 +452,13 @@ endif ()
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX") set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
if (USE_STATIC_LIBRARIES) set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
set (CMAKE_POSITION_INDEPENDENT_CODE OFF) if (OS_LINUX AND NOT ARCH_AARCH64)
if (OS_LINUX AND NOT ARCH_AARCH64) # Slightly more efficient code can be generated
# Slightly more efficient code can be generated # It's disabled for ARM because otherwise ClickHouse cannot run on Android.
# It's disabled for ARM because otherwise ClickHouse cannot run on Android. set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
endif ()
else ()
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
# This is required for clang on Arch linux, that uses PIE by default.
# See enable-SSP-and-PIE-by-default.patch [1].
#
# [1]: https://github.com/archlinux/svntogit-packages/blob/6e681aa860e65ad46a1387081482eb875c2200f2/trunk/enable-SSP-and-PIE-by-default.patch
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie")
endif () endif ()
if (ENABLE_TESTS) if (ENABLE_TESTS)
@ -504,10 +480,7 @@ else ()
set (CLICKHOUSE_ETC_DIR "${CMAKE_INSTALL_PREFIX}/etc") set (CLICKHOUSE_ETC_DIR "${CMAKE_INSTALL_PREFIX}/etc")
endif () endif ()
message (STATUS message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}")
"Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ;
USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES}
SPLIT_SHARED_LIBRARIES=${SPLIT_SHARED_LIBRARIES}")
include (GNUInstallDirs) include (GNUInstallDirs)
@ -553,7 +526,7 @@ macro (clickhouse_add_executable target)
# - _je_zone_register due to JEMALLOC_PRIVATE_NAMESPACE=je_ under OS X. # - _je_zone_register due to JEMALLOC_PRIVATE_NAMESPACE=je_ under OS X.
# - but jemalloc-cmake does not run private_namespace.sh # - but jemalloc-cmake does not run private_namespace.sh
# so symbol name should be _zone_register # so symbol name should be _zone_register
if (ENABLE_JEMALLOC AND USE_STATIC_LIBRARIES AND OS_DARWIN) if (ENABLE_JEMALLOC AND OS_DARWIN)
set_property(TARGET ${target} APPEND PROPERTY LINK_OPTIONS -u_zone_register) set_property(TARGET ${target} APPEND PROPERTY LINK_OPTIONS -u_zone_register)
endif() endif()
endif() endif()

View File

@ -39,10 +39,6 @@ endif ()
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..") target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
endif()
target_link_libraries (common target_link_libraries (common
PUBLIC PUBLIC
ch_contrib::cityhash ch_contrib::cityhash

View File

@ -37,7 +37,7 @@ if (GLIBC_COMPATIBILITY)
target_include_directories(glibc-compatibility PRIVATE libcxxabi ${musl_arch_include_dir}) target_include_directories(glibc-compatibility PRIVATE libcxxabi ${musl_arch_include_dir})
if (( NOT USE_STATIC_LIBRARIES AND NOT USE_STATIC_LIBRARIES ) OR ENABLE_OPENSSL_DYNAMIC) if (ENABLE_OPENSSL_DYNAMIC)
target_compile_options(glibc-compatibility PRIVATE -fPIC) target_compile_options(glibc-compatibility PRIVATE -fPIC)
endif () endif ()

View File

@ -102,6 +102,11 @@ elseif (ARCH_AMD64)
SET(ENABLE_AVX512_FOR_SPEC_OP 0) SET(ENABLE_AVX512_FOR_SPEC_OP 0)
endif() endif()
# ClickHouse can be cross-compiled (e.g. on an ARM host for x86) but it is also possible to build ClickHouse on x86 w/o AVX for x86 w/
# AVX. We only check that the compiler can emit certain SIMD instructions, we don't care if the host system is able to run the binary.
# Therefore, use check_cxx_source_compiles (= does the code compile+link?) instead of check_cxx_source_runs (= does the code
# compile+link+run).
set (TEST_FLAG "-mssse3") set (TEST_FLAG "-mssse3")
set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0") set (CMAKE_REQUIRED_FLAGS "${TEST_FLAG} -O0")
check_cxx_source_compiles(" check_cxx_source_compiles("

View File

@ -25,7 +25,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${ASAN_FLAGS}") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${ASAN_FLAGS}")
endif() endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
endif () endif ()
if (COMPILER_GCC) if (COMPILER_GCC)
@ -50,7 +50,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory")
endif() endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan")
endif () endif ()
@ -71,7 +71,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread")
endif() endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan")
endif () endif ()
if (COMPILER_GCC) if (COMPILER_GCC)
@ -103,7 +103,7 @@ if (SANITIZE)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
endif() endif()
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
endif () endif ()
if (COMPILER_GCC) if (COMPILER_GCC)

View File

@ -115,12 +115,25 @@ endif()
add_contrib (llvm-project-cmake llvm-project) add_contrib (llvm-project-cmake llvm-project)
add_contrib (libfuzzer-cmake llvm-project) add_contrib (libfuzzer-cmake llvm-project)
add_contrib (libxml2-cmake libxml2) add_contrib (libxml2-cmake libxml2)
add_contrib (aws-s3-cmake
add_contrib (aws-cmake
aws aws
aws-c-auth
aws-c-cal
aws-c-common aws-c-common
aws-c-compression
aws-c-event-stream aws-c-event-stream
aws-c-http
aws-c-io
aws-c-mqtt
aws-c-s3
aws-c-sdkutils
aws-s2n-tls
aws-checksums aws-checksums
aws-crt-cpp
aws-cmake
) )
add_contrib (base64-cmake base64) add_contrib (base64-cmake base64)
add_contrib (simdjson-cmake simdjson) add_contrib (simdjson-cmake simdjson)
add_contrib (rapidjson-cmake rapidjson) add_contrib (rapidjson-cmake rapidjson)

View File

@ -78,23 +78,14 @@ set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include") set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
# set flatbuffers CMake options # set flatbuffers CMake options
if (USE_STATIC_LIBRARIES) set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library") set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
else ()
set(FLATBUFFERS_BUILD_SHAREDLIB ON CACHE BOOL "Enable the build of the flatbuffers shared library")
set(FLATBUFFERS_BUILD_FLATLIB OFF CACHE BOOL "Disable the build of the flatbuffers library")
endif ()
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests") set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}") add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
add_library(_flatbuffers INTERFACE) add_library(_flatbuffers INTERFACE)
if(USE_STATIC_LIBRARIES) target_link_libraries(_flatbuffers INTERFACE flatbuffers)
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
else()
target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared)
endif()
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR}) target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
# === hdfs # === hdfs

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit 00b03604543367d7e310cb0993973fdcb723ea79 Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252

1
contrib/aws-c-auth vendored Submodule

@ -0,0 +1 @@
Subproject commit 30df6c407e2df43bd244e2c34c9b4a4b87372bfb

1
contrib/aws-c-cal vendored Submodule

@ -0,0 +1 @@
Subproject commit 85dd7664b786a389c6fb1a6f031ab4bb2282133d

@ -1 +1 @@
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857 Subproject commit 324fd1d973ccb25c813aa747bf1759cfde5121c5

1
contrib/aws-c-compression vendored Submodule

@ -0,0 +1 @@
Subproject commit b517b7decd0dac30be2162f5186c250221c53aff

@ -1 +1 @@
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0 Subproject commit 39bfa94a14b7126bf0c1330286ef8db452d87e66

1
contrib/aws-c-http vendored Submodule

@ -0,0 +1 @@
Subproject commit 2c5a2a7d5556600b9782ffa6c9d7e09964df1abc

1
contrib/aws-c-io vendored Submodule

@ -0,0 +1 @@
Subproject commit 5d32c453560d0823df521a686bf7fbacde7f9be3

1
contrib/aws-c-mqtt vendored Submodule

@ -0,0 +1 @@
Subproject commit 882c689561a3db1466330ccfe3b63637e0a575d3

1
contrib/aws-c-s3 vendored Submodule

@ -0,0 +1 @@
Subproject commit a41255ece72a7c887bba7f9d998ca3e14f4c8a1b

1
contrib/aws-c-sdkutils vendored Submodule

@ -0,0 +1 @@
Subproject commit 25bf5cf225f977c3accc6a05a0a7a181ef2a4a30

@ -1 +1 @@
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65 Subproject commit 48e7c0e01479232f225c8044d76c84e74192889d

View File

@ -0,0 +1,114 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCSourceRuns)
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
# work-around, disable avx2 (and all other extensions) in mingw builds.
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
#
if (MINGW)
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
set(USE_CPU_EXTENSIONS OFF)
endif()
if(NOT CMAKE_CROSSCOMPILING)
check_c_source_runs("
#include <stdbool.h>
bool foo(int a, int b, int *c) {
return __builtin_mul_overflow(a, b, c);
}
int main() {
int out;
if (foo(1, 2, &out)) {
return 0;
}
return 0;
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
if (USE_CPU_EXTENSIONS)
check_c_source_runs("
int main() {
int foo = 42;
_mulx_u32(1, 2, &foo);
return foo != 2;
}" AWS_HAVE_MSVC_MULX)
endif()
endif()
check_c_source_compiles("
#include <Windows.h>
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
int main() {
return 0;
}
#else
it's not windows desktop
#endif
" AWS_HAVE_WINAPI_DESKTOP)
check_c_source_compiles("
int main() {
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
# error \"not intel\"
#endif
return 0;
}
" AWS_ARCH_INTEL)
check_c_source_compiles("
int main() {
#if !(defined(__aarch64__) || defined(_M_ARM64))
# error \"not arm64\"
#endif
return 0;
}
" AWS_ARCH_ARM64)
check_c_source_compiles("
int main() {
#if !(defined(__arm__) || defined(_M_ARM))
# error \"not arm\"
#endif
return 0;
}
" AWS_ARCH_ARM32)
check_c_source_compiles("
int main() {
int foo = 42, bar = 24;
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
}" AWS_HAVE_GCC_INLINE_ASM)
check_c_source_compiles("
#include <sys/auxv.h>
int main() {
#ifdef __linux__
getauxval(AT_HWCAP);
getauxval(AT_HWCAP2);
#endif
return 0;
}" AWS_HAVE_AUXV)
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
check_c_source_compiles("
#include <execinfo.h>
int main() {
backtrace(NULL, 0);
return 0;
}" AWS_HAVE_EXECINFO)
endif()
check_c_source_compiles("
#include <linux/if_link.h>
int main() {
return 1;
}" AWS_HAVE_LINUX_IF_LINK_H)

View File

@ -0,0 +1,74 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCCompilerFlag)
include(CheckIncludeFile)
if (USE_CPU_EXTENSIONS)
if (MSVC)
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "/arch:AVX2")
endif()
else()
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "-mavx -mavx2")
endif()
endif()
cmake_push_check_state()
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
check_c_source_compiles("
#include <immintrin.h>
#include <emmintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
_mm256_shuffle_epi8(vec, vec);
_mm256_set_epi32(1,2,3,4,5,6,7,8);
_mm256_permutevar8x32_epi32(vec, vec);
return 0;
}" HAVE_AVX2_INTRINSICS)
check_c_source_compiles("
#include <immintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
return (int)_mm256_extract_epi64(vec, 2);
}" HAVE_MM256_EXTRACT_EPI64)
cmake_pop_check_state()
endif() # USE_CPU_EXTENSIONS
macro(simd_add_definition_if target definition)
if(${definition})
target_compile_definitions(${target} PRIVATE -D${definition})
endif(${definition})
endmacro(simd_add_definition_if)
# Configure private preprocessor definitions for SIMD-related features
# Does not set any processor feature codegen flags
function(simd_add_definitions target)
simd_add_definition_if(${target} HAVE_AVX2_INTRINSICS)
simd_add_definition_if(${target} HAVE_MM256_EXTRACT_EPI64)
endfunction(simd_add_definitions)
# Adds source files only if AVX2 is supported. These files will be built with
# avx2 intrinsics enabled.
# Usage: simd_add_source_avx2(target file1.c file2.c ...)
function(simd_add_source_avx2 target)
foreach(file ${ARGN})
target_sources(${target} PRIVATE ${file})
set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
endforeach()
endfunction(simd_add_source_avx2)

View File

@ -0,0 +1,50 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check if the platform supports setting thread affinity
# (important for hitting full NIC entitlement on NUMA architectures)
function(aws_set_thread_affinity_method target)
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
if (NOT UNIX OR ANDROID OR APPLE)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
set(headers "pthread.h")
# BSDs put nonportable pthread declarations in a separate header.
if(CMAKE_SYSTEM_NAME MATCHES BSD)
set(headers "${headers};pthread_np.h")
endif()
# Using pthread attrs is the preferred method, but is glibc-specific.
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
if (USE_PTHREAD_ATTR_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
return()
endif()
# This method is still nonportable, but is supported by musl and BSDs.
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
if (USE_PTHREAD_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
return()
endif()
# If we got here, we expected thread affinity support but didn't find it.
# We still build with degraded NUMA performance, but show a warning.
message(WARNING "No supported method for setting thread affinity")
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
cmake_pop_check_state()
endfunction()

View File

@ -0,0 +1,61 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check how the platform supports setting thread name
function(aws_set_thread_name_method target)
if (WINDOWS)
# On Windows we do a runtime check, instead of compile-time check
return()
elseif (APPLE)
# All Apple platforms we support have the same function, so no need for compile-time check.
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
# The start of the test program
set(c_source_start "
#define _GNU_SOURCE
#include <pthread.h>
#if defined(__FreeBSD__) || defined(__NETBSD__)
#include <pthread_np.h>
#endif
int main() {
pthread_t thread_id;
")
# The end of the test program
set(c_source_end "}")
# pthread_setname_np() usually takes 2 args
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\");
${c_source_end}"
PTHREAD_SETNAME_TAKES_2ARGS)
if (PTHREAD_SETNAME_TAKES_2ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
return()
endif()
# But on NetBSD it takes 3!
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\", NULL);
${c_source_end}
" PTHREAD_SETNAME_TAKES_3ARGS)
if (PTHREAD_SETNAME_TAKES_3ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
return()
endif()
# And on many older/weirder platforms it's just not supported
cmake_pop_check_state()
endfunction()

View File

@ -0,0 +1,376 @@
set(ENABLE_AWS_S3_DEFAULT OFF)
if(ENABLE_LIBRARIES AND (OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::Crypto)
set(ENABLE_AWS_S3_DEFAULT ON)
endif()
option(ENABLE_AWS_S3 "Enable AWS S3" ${ENABLE_AWS_S3_DEFAULT})
if(ENABLE_AWS_S3)
if(NOT TARGET OpenSSL::Crypto)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK without OpenSSL")
elseif(NOT (OS_LINUX OR OS_DARWIN))
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK with platform ${CMAKE_SYSTEM_NAME}")
endif()
endif()
if(NOT ENABLE_AWS_S3)
message(STATUS "Not using AWS S3")
return()
endif()
# Utilities.
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsFeatureTests.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
# Gather sources and options.
set(AWS_SOURCES)
set(AWS_PUBLIC_INCLUDES)
set(AWS_PRIVATE_INCLUDES)
set(AWS_PUBLIC_COMPILE_DEFS)
set(AWS_PRIVATE_COMPILE_DEFS)
set(AWS_PRIVATE_LIBS)
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
endif()
set(ENABLE_OPENSSL_ENCRYPTION ON)
if (ENABLE_OPENSSL_ENCRYPTION)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DENABLE_OPENSSL_ENCRYPTION")
endif()
set(USE_S2N ON)
if (USE_S2N)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_S2N")
endif()
# Directories.
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-core")
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-s3")
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
SET(AWS_CHECKSUMS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
SET(AWS_COMMON_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
SET(AWS_COMPRESSION_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-compression")
SET(AWS_CRT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-crt-cpp")
SET(AWS_EVENT_STREAM_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
SET(AWS_HTTP_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-http")
SET(AWS_IO_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-io")
SET(AWS_MQTT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-mqtt")
SET(AWS_S2N_TLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-s2n-tls")
SET(AWS_S3_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-s3")
SET(AWS_SDKUTILS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-sdkutils")
# aws-cpp-sdk-core
file(GLOB AWS_SDK_CORE_SRC
"${AWS_SDK_CORE_DIR}/source/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/bearer-token-provider/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/signer/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/signer-provider/*.cpp"
"${AWS_SDK_CORE_DIR}/source/client/*.cpp"
"${AWS_SDK_CORE_DIR}/source/config/*.cpp"
"${AWS_SDK_CORE_DIR}/source/config/defaults/*.cpp"
"${AWS_SDK_CORE_DIR}/source/endpoint/*.cpp"
"${AWS_SDK_CORE_DIR}/source/endpoint/internal/*.cpp"
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/memory/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/stream/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/threading/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/xml/*.cpp"
)
if(OS_LINUX OR OS_DARWIN)
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/linux-shared/*.cpp")
file(GLOB AWS_SDK_CORE_PLATFORM_SRC "${AWS_SDK_CORE_DIR}/source/platform/linux-shared/*.cpp")
else()
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/*.cpp")
set(AWS_SDK_CORE_PLATFORM_SRC)
endif()
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MAJOR=1")
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MINOR=10")
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_PATCH=36")
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
list(APPEND AWS_PUBLIC_INCLUDES
"${AWS_SDK_CORE_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
# aws-cpp-sdk-s3
file(GLOB AWS_SDK_S3_SRC
"${AWS_SDK_S3_DIR}/source/*.cpp"
"${AWS_SDK_S3_DIR}/source/model/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
# aws-c-auth
file(GLOB AWS_AUTH_SRC
"${AWS_AUTH_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_AUTH_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_AUTH_DIR}/include/")
# aws-c-cal
file(GLOB AWS_CAL_SRC
"${AWS_CAL_DIR}/source/*.c"
)
if (ENABLE_OPENSSL_ENCRYPTION)
file(GLOB AWS_CAL_OS_SRC
"${AWS_CAL_DIR}/source/unix/*.c"
)
list(APPEND AWS_PRIVATE_LIBS OpenSSL::Crypto)
endif()
list(APPEND AWS_SOURCES ${AWS_CAL_SRC} ${AWS_CAL_OS_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CAL_DIR}/include/")
# aws-c-event-stream
file(GLOB AWS_EVENT_STREAM_SRC
"${AWS_EVENT_STREAM_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_EVENT_STREAM_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_EVENT_STREAM_DIR}/include/")
# aws-c-common
file(GLOB AWS_COMMON_SRC
"${AWS_COMMON_DIR}/source/*.c"
"${AWS_COMMON_DIR}/source/external/*.c"
"${AWS_COMMON_DIR}/source/posix/*.c"
)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/generic/*.c"
)
if (AWS_ARCH_INTEL)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/intel/cpuid.c"
"${AWS_COMMON_DIR}/source/arch/intel/asm/*.c"
)
elseif (AWS_ARCH_ARM64 OR AWS_ARCH_ARM32)
if (AWS_HAVE_AUXV)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/arm/asm/*.c"
)
endif()
endif()
set(AWS_COMMON_AVX2_SRC)
if (HAVE_AVX2_INTRINSICS)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_SIMD_ENCODING")
set(AWS_COMMON_AVX2_SRC "${AWS_COMMON_DIR}/source/arch/intel/encoding_avx2.c")
set_source_files_properties(${AWS_COMMON_AVX2_SRC} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
endif()
configure_file("${AWS_COMMON_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
list(APPEND AWS_SOURCES ${AWS_COMMON_SRC} ${AWS_COMMON_ARCH_SRC} ${AWS_COMMON_AVX2_SRC})
list(APPEND AWS_PUBLIC_INCLUDES
"${AWS_COMMON_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
# aws-checksums
file(GLOB AWS_CHECKSUMS_SRC
"${AWS_CHECKSUMS_DIR}/source/*.c"
"${AWS_CHECKSUMS_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
)
if(AWS_ARCH_INTEL AND AWS_HAVE_GCC_INLINE_ASM)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
)
endif()
if (AWS_ARCH_ARM64)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
)
set_source_files_properties("${AWS_CHECKSUMS_DIR}/source/arm/crc32c_arm.c" PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
elseif (AWS_ARCH_ARM32)
if (AWS_ARM32_CRC)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
"${AWS_CHECKSUMS_DIR}/source/arm/asm/*.c"
)
set_source_files_properties(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
endif()
endif()
list(APPEND AWS_SOURCES ${AWS_CHECKSUMS_SRC} ${AWS_CHECKSUMS_ARCH_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CHECKSUMS_DIR}/include/")
# aws-c-io
file(GLOB AWS_IO_SRC
"${AWS_IO_DIR}/source/*.c"
)
if (OS_LINUX)
file(GLOB AWS_IO_OS_SRC
"${AWS_IO_DIR}/source/linux/*.c"
"${AWS_IO_DIR}/source/posix/*.c"
)
elseif (OS_DARWIN)
file(GLOB AWS_IO_OS_SRC
"${AWS_IO_DIR}/source/bsd/*.c"
"${AWS_IO_DIR}/source/posix/*.c"
)
endif()
set(AWS_IO_TLS_SRC)
if (USE_S2N)
file(GLOB AWS_IO_TLS_SRC
"${AWS_IO_DIR}/source/s2n/*.c"
)
endif()
list(APPEND AWS_SOURCES ${AWS_IO_SRC} ${AWS_IO_OS_SRC} ${AWS_IO_TLS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_IO_DIR}/include/")
# aws-s2n-tls
if (USE_S2N)
file(GLOB AWS_S2N_TLS_SRC
"${AWS_S2N_TLS_DIR}/crypto/*.c"
"${AWS_S2N_TLS_DIR}/error/*.c"
"${AWS_S2N_TLS_DIR}/stuffer/*.c"
"${AWS_S2N_TLS_DIR}/pq-crypto/*.c"
"${AWS_S2N_TLS_DIR}/pq-crypto/kyber_r3/*.c"
"${AWS_S2N_TLS_DIR}/tls/*.c"
"${AWS_S2N_TLS_DIR}/tls/extensions/*.c"
"${AWS_S2N_TLS_DIR}/utils/*.c"
)
list(APPEND AWS_SOURCES ${AWS_S2N_TLS_SRC})
list(APPEND AWS_PRIVATE_INCLUDES
"${AWS_S2N_TLS_DIR}/"
"${AWS_S2N_TLS_DIR}/api/"
)
endif()
# aws-crt-cpp
file(GLOB AWS_CRT_SRC
"${AWS_CRT_DIR}/source/*.cpp"
"${AWS_CRT_DIR}/source/auth/*.cpp"
"${AWS_CRT_DIR}/source/crypto/*.cpp"
"${AWS_CRT_DIR}/source/endpoints/*.cpp"
"${AWS_CRT_DIR}/source/external/*.cpp"
"${AWS_CRT_DIR}/source/http/*.cpp"
"${AWS_CRT_DIR}/source/io/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_CRT_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_CRT_DIR}/include/")
# aws-c-mqtt
file(GLOB AWS_MQTT_SRC
"${AWS_MQTT_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_MQTT_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_MQTT_DIR}/include/")
# aws-c-http
file(GLOB AWS_HTTP_SRC
"${AWS_HTTP_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_HTTP_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_HTTP_DIR}/include/")
# aws-c-compression
file(GLOB AWS_COMPRESSION_SRC
"${AWS_COMPRESSION_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_COMPRESSION_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_COMPRESSION_DIR}/include/")
# aws-c-s3
file(GLOB AWS_S3_SRC
"${AWS_S3_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_S3_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_S3_DIR}/include/")
# aws-c-sdkutils
file(GLOB AWS_SDKUTILS_SRC
"${AWS_SDKUTILS_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_SDKUTILS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDKUTILS_DIR}/include/")
# Add library.
add_library(_aws ${AWS_SOURCES})
target_include_directories(_aws SYSTEM BEFORE PUBLIC ${AWS_PUBLIC_INCLUDES})
target_include_directories(_aws SYSTEM BEFORE PRIVATE ${AWS_PRIVATE_INCLUDES})
target_compile_definitions(_aws PUBLIC ${AWS_PUBLIC_COMPILE_DEFS})
target_compile_definitions(_aws PRIVATE ${AWS_PRIVATE_COMPILE_DEFS})
target_link_libraries(_aws PRIVATE ${AWS_PRIVATE_LIBS})
aws_set_thread_affinity_method(_aws)
aws_set_thread_name_method(_aws)
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options (_aws PRIVATE -g0)
endif()
add_library(ch_contrib::aws_s3 ALIAS _aws)

1
contrib/aws-crt-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit ec0bea288f451d884c0d80d534bc5c66241c39a4

1
contrib/aws-s2n-tls vendored Submodule

@ -0,0 +1 @@
Subproject commit 15d534e8a9ca1eda6bacee514e37d08b4f38a526

View File

@ -1,122 +0,0 @@
if(NOT OS_FREEBSD)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
elseif(ENABLE_S3)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on FreeBSD")
endif()
if(NOT ENABLE_S3)
message(STATUS "Not using S3")
return()
endif()
SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core")
SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
file(GLOB AWS_CORE_SOURCES
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
)
file(GLOB AWS_S3_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
)
file(GLOB AWS_S3_MODEL_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
)
file(GLOB AWS_EVENT_STREAM_SOURCES
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
)
file(GLOB AWS_COMMON_SOURCES
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
)
file(GLOB AWS_CHECKSUMS_SOURCES
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
)
file(GLOB S3_UNIFIED_SRC
${AWS_EVENT_STREAM_SOURCES}
${AWS_COMMON_SOURCES}
${AWS_S3_SOURCES}
${AWS_S3_MODEL_SOURCES}
${AWS_CORE_SOURCES}
)
set(S3_INCLUDES
"${AWS_COMMON_LIBRARY_DIR}/include/"
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
"${AWS_S3_LIBRARY_DIR}/include/"
"${AWS_CORE_LIBRARY_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include/"
)
add_library(_aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
target_include_directories(_aws_s3_checksums SYSTEM PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
target_compile_definitions(_aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
endif()
set_target_properties(_aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
set_property(TARGET _aws_s3_checksums PROPERTY C_STANDARD 99)
add_library(_aws_s3 ${S3_UNIFIED_SRC})
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(_aws_s3 SYSTEM BEFORE PUBLIC ${S3_INCLUDES})
if (TARGET OpenSSL::SSL)
target_compile_definitions(_aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
target_link_libraries(_aws_s3 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
endif()
target_link_libraries(_aws_s3 PRIVATE _aws_s3_checksums)
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options (_aws_s3 PRIVATE -g0)
target_compile_options (_aws_s3_checksums PRIVATE -g0)
endif()
add_library(ch_contrib::aws_s3 ALIAS _aws_s3)

View File

@ -139,13 +139,6 @@ if(NOT OPENSSL_NO_ASM)
endif() endif()
endif() endif()
if(BUILD_SHARED_LIBS)
add_definitions(-DBORINGSSL_SHARED_LIBRARY)
# Enable position-independent code globally. This is needed because
# some library targets are OBJECT libraries.
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
endif()
set( set(
CRYPTO_ios_aarch64_SOURCES CRYPTO_ios_aarch64_SOURCES

View File

@ -63,13 +63,8 @@ SET(SRCS
"${LIBRARY_DIR}/src/lib/windows_port.c" "${LIBRARY_DIR}/src/lib/windows_port.c"
) )
if (USE_STATIC_LIBRARIES) add_library(_c-ares STATIC ${SRCS})
add_library(_c-ares STATIC ${SRCS}) target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
else()
add_library(_c-ares SHARED ${SRCS})
target_compile_definitions(_c-ares PUBLIC CARES_BUILDING_LIBRARY)
endif()
target_compile_definitions(_c-ares PRIVATE HAVE_CONFIG_H=1) target_compile_definitions(_c-ares PRIVATE HAVE_CONFIG_H=1)

View File

@ -136,11 +136,6 @@ add_library(ch_contrib::uv ALIAS _uv)
target_compile_definitions(_uv PRIVATE ${uv_defines}) target_compile_definitions(_uv PRIVATE ${uv_defines})
target_include_directories(_uv SYSTEM PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src) target_include_directories(_uv SYSTEM PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
target_link_libraries(_uv ${uv_libraries}) target_link_libraries(_uv ${uv_libraries})
if (NOT USE_STATIC_LIBRARIES)
target_compile_definitions(_uv
INTERFACE USING_UV_SHARED=1
PRIVATE BUILDING_UV_SHARED=1)
endif()
if(UNIX) if(UNIX)
# Now for some gibbering horrors from beyond the stars... # Now for some gibbering horrors from beyond the stars...

View File

@ -6,8 +6,6 @@ endif()
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT}) option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
# If USE_STATIC_LIBRARIES=0 was passed to CMake, we'll still build LLVM statically to keep complexity minimal.
if (NOT ENABLE_EMBEDDED_COMPILER) if (NOT ENABLE_EMBEDDED_COMPILER)
message(STATUS "Not using LLVM") message(STATUS "Not using LLVM")
return() return()

View File

@ -1,4 +1,4 @@
if (NOT OS_FREEBSD AND NOT SPLIT_SHARED_LIBRARIES AND NOT (OS_DARWIN AND COMPILER_CLANG)) if (NOT OS_FREEBSD AND NOT (OS_DARWIN AND COMPILER_CLANG))
option (ENABLE_SENTRY "Enable Sentry" ${ENABLE_LIBRARIES}) option (ENABLE_SENTRY "Enable Sentry" ${ENABLE_LIBRARIES})
else() else()
option (ENABLE_SENTRY "Enable Sentry" OFF) option (ENABLE_SENTRY "Enable Sentry" OFF)
@ -51,11 +51,7 @@ endif()
add_library(_sentry ${SRCS}) add_library(_sentry ${SRCS})
if(BUILD_SHARED_LIBS) target_compile_definitions(_sentry PUBLIC SENTRY_BUILD_STATIC)
target_compile_definitions(_sentry PRIVATE SENTRY_BUILD_SHARED)
else()
target_compile_definitions(_sentry PUBLIC SENTRY_BUILD_STATIC)
endif()
target_link_libraries(_sentry PRIVATE ch_contrib::curl pthread) target_link_libraries(_sentry PRIVATE ch_contrib::curl pthread)
target_include_directories(_sentry PUBLIC "${SRC_DIR}/include" PRIVATE "${SRC_DIR}/src") target_include_directories(_sentry PUBLIC "${SRC_DIR}/include" PRIVATE "${SRC_DIR}/src")

2
contrib/sysroot vendored

@ -1 +1 @@
Subproject commit 0f41651860fa4a530ecd68b93a15b8fd77397adf Subproject commit f0081b2649b94837855f3bc7d05ef326b100bad8

View File

@ -2,7 +2,6 @@
"docker/packager/binary": { "docker/packager/binary": {
"name": "clickhouse/binary-builder", "name": "clickhouse/binary-builder",
"dependent": [ "dependent": [
"docker/test/split_build_smoke_test",
"docker/test/codebrowser" "docker/test/codebrowser"
] ]
}, },
@ -55,10 +54,6 @@
"name": "clickhouse/stress-test", "name": "clickhouse/stress-test",
"dependent": [] "dependent": []
}, },
"docker/test/split_build_smoke_test": {
"name": "clickhouse/split-build-smoke-test",
"dependent": []
},
"docker/test/codebrowser": { "docker/test/codebrowser": {
"name": "clickhouse/codebrowser", "name": "clickhouse/codebrowser",
"dependent": [] "dependent": []

View File

@ -108,11 +108,6 @@ mv ./programs/clickhouse* /output
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output [ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
# Exclude cargo build directory since it may have some shared libraries
# (even though they are not required for the clickhouse binary)
find . -name '*.so' -not -path '*/cargo/*' -print -exec mv '{}' /output \;
find . -name '*.so.*' -not -path '*/cargo/*' -print -exec mv '{}' /output \;
prepare_combined_output () { prepare_combined_output () {
local OUTPUT local OUTPUT
OUTPUT="$1" OUTPUT="$1"
@ -168,7 +163,7 @@ then
) )
fi fi
# May be set for split build or for performance test. # May be set for performance test.
if [ "" != "$COMBINED_OUTPUT" ] if [ "" != "$COMBINED_OUTPUT" ]
then then
prepare_combined_output /output prepare_combined_output /output

View File

@ -100,12 +100,11 @@ def run_docker_image_with_env(
subprocess.check_call(cmd, shell=True) subprocess.check_call(cmd, shell=True)
def is_release_build(build_type, package_type, sanitizer, shared_libraries): def is_release_build(build_type, package_type, sanitizer):
return ( return (
build_type == "" build_type == ""
and package_type == "deb" and package_type == "deb"
and sanitizer == "" and sanitizer == ""
and not shared_libraries
) )
@ -116,7 +115,6 @@ def parse_env_variables(
package_type, package_type,
cache, cache,
distcc_hosts, distcc_hosts,
shared_libraries,
clang_tidy, clang_tidy,
version, version,
author, author,
@ -218,7 +216,7 @@ def parse_env_variables(
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr") cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc") cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var") cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
if is_release_build(build_type, package_type, sanitizer, shared_libraries): if is_release_build(build_type, package_type, sanitizer):
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON") cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
result.append("WITH_PERFORMANCE=1") result.append("WITH_PERFORMANCE=1")
if is_cross_arm: if is_cross_arm:
@ -231,12 +229,10 @@ def parse_env_variables(
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}") cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}") cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
# Create combined output archive for shared library build and for performance tests. # Create combined output archive for performance tests.
if package_type == "coverity": if package_type == "coverity":
result.append("COMBINED_OUTPUT=coverity") result.append("COMBINED_OUTPUT=coverity")
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"') result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
elif shared_libraries:
result.append("COMBINED_OUTPUT=shared_build")
if sanitizer: if sanitizer:
result.append(f"SANITIZER={sanitizer}") result.append(f"SANITIZER={sanitizer}")
@ -285,15 +281,6 @@ def parse_env_variables(
result.append("BINARY_OUTPUT=tests") result.append("BINARY_OUTPUT=tests")
cmake_flags.append("-DENABLE_TESTS=1") cmake_flags.append("-DENABLE_TESTS=1")
if shared_libraries:
cmake_flags.append("-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1")
# We can't always build utils because it requires too much space, but
# we have to build them at least in some way in CI. The shared library
# build is probably the least heavy disk-wise.
cmake_flags.append("-DENABLE_UTILS=1")
# utils are not included into clickhouse-bundle, so build everything
build_target = "all"
if clang_tidy: if clang_tidy:
cmake_flags.append("-DENABLE_CLANG_TIDY=1") cmake_flags.append("-DENABLE_CLANG_TIDY=1")
cmake_flags.append("-DENABLE_TESTS=1") cmake_flags.append("-DENABLE_TESTS=1")
@ -371,7 +358,6 @@ if __name__ == "__main__":
default="", default="",
) )
parser.add_argument("--shared-libraries", action="store_true")
parser.add_argument("--clang-tidy", action="store_true") parser.add_argument("--clang-tidy", action="store_true")
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="") parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
parser.add_argument( parser.add_argument(
@ -424,7 +410,6 @@ if __name__ == "__main__":
args.package_type, args.package_type,
args.cache, args.cache,
args.distcc_hosts, args.distcc_hosts,
args.shared_libraries,
args.clang_tidy, args.clang_tidy,
args.version, args.version,
args.author, args.author,

View File

@ -267,7 +267,7 @@ quit
echo "Lost connection to server. See the logs." > description.txt echo "Lost connection to server. See the logs." > description.txt
fi fi
if grep -F --text 'Sanitizer: out-of-memory' description.txt if grep -E --text 'Sanitizer: (out-of-memory|failed to allocate)' description.txt
then then
# OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description. # OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description.
task_exit_code=0 task_exit_code=0

View File

@ -83,6 +83,7 @@ RUN python3 -m pip install \
pytest \ pytest \
pytest-order==1.0.0 \ pytest-order==1.0.0 \
pytest-timeout \ pytest-timeout \
pytest-random \
pytest-xdist \ pytest-xdist \
pytest-repeat \ pytest-repeat \
pytz \ pytz \

View File

@ -297,6 +297,7 @@ if not args.use_existing_tables:
# Let's sync the data to avoid writeback affects performance # Let's sync the data to avoid writeback affects performance
os.system("sync") os.system("sync")
reportStageEnd("sync")
# By default, test all queries. # By default, test all queries.
queries_to_run = range(0, len(test_queries)) queries_to_run = range(0, len(test_queries))

View File

@ -1,9 +0,0 @@
# rebuild in #33610
# docker build -t clickhouse/split-build-smoke-test .
ARG FROM_TAG=latest
FROM clickhouse/binary-builder:$FROM_TAG
COPY run.sh /run.sh
COPY process_split_build_smoke_test_result.py /
CMD /run.sh

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python3
import os
import logging
import argparse
import csv
RESULT_LOG_NAME = "run.log"
def process_result(result_folder):
status = "success"
description = "Server started and responded"
summary = [("Smoke test", "OK")]
with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log:
lines = run_log.read().split("\n")
if not lines or lines[0].strip() != "OK":
status = "failure"
logging.info("Lines is not ok: %s", str("\n".join(lines)))
summary = [("Smoke test", "FAIL")]
description = "Server failed to respond, see result in logs"
result_logs = []
server_log_path = os.path.join(result_folder, "clickhouse-server.log")
stderr_log_path = os.path.join(result_folder, "stderr.log")
client_stderr_log_path = os.path.join(result_folder, "clientstderr.log")
if os.path.exists(server_log_path):
result_logs.append(server_log_path)
if os.path.exists(stderr_log_path):
result_logs.append(stderr_log_path)
if os.path.exists(client_stderr_log_path):
result_logs.append(client_stderr_log_path)
return status, description, summary, result_logs
def write_results(results_file, status_file, results, status):
with open(results_file, "w") as f:
out = csv.writer(f, delimiter="\t")
out.writerows(results)
with open(status_file, "w") as f:
out = csv.writer(f, delimiter="\t")
out.writerow(status)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
parser = argparse.ArgumentParser(
description="ClickHouse script for parsing results of split build smoke test"
)
parser.add_argument("--in-results-dir", default="/test_output/")
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
args = parser.parse_args()
state, description, test_results, logs = process_result(args.in_results_dir)
logging.info("Result parsed")
status = (state, description)
write_results(args.out_results_file, args.out_status_file, test_results, status)
logging.info("Result written")

View File

@ -1,22 +0,0 @@
#!/bin/bash
set -x
install_and_run_server() {
mkdir /unpacked
tar -xzf /package_folder/shared_build.tgz -C /unpacked --strip 1
LD_LIBRARY_PATH=/unpacked /unpacked/clickhouse-server --config /unpacked/config/config.xml >/test_output/stderr.log 2>&1 &
}
run_client() {
for i in {1..100}; do
sleep 1
LD_LIBRARY_PATH=/unpacked /unpacked/clickhouse-client --query "select 'OK'" > /test_output/run.log 2> /test_output/clientstderr.log && break
[[ $i == 100 ]] && echo 'FAIL'
done
}
install_and_run_server
run_client
mv /var/log/clickhouse-server/clickhouse-server.log /test_output/clickhouse-server.log
/process_split_build_smoke_test_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv

View File

@ -1,90 +1,151 @@
#!/bin/bash #!/bin/bash
USAGE='Usage for local run: set -euxf -o pipefail
./docker/test/stateless/setup_minio.sh { stateful | stateless } ./tests/ export MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
export MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
' usage() {
echo $"Usage: $0 <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
exit 1
}
set -e -x -a -u check_arg() {
local query_dir
TEST_TYPE="$1" if [ ! $# -eq 1 ]; then
shift if [ ! $# -eq 2 ]; then
echo "ERROR: need either one or two arguments, <stateful|stateless> <test_path> (default path: /usr/share/clickhouse-test)"
case $TEST_TYPE in usage
stateless) QUERY_DIR=0_stateless ;; fi
stateful) QUERY_DIR=1_stateful ;;
*) echo "unknown test type $TEST_TYPE"; echo "${USAGE}"; exit 1 ;;
esac
ls -lha
mkdir -p ./minio_data
if [ ! -f ./minio ]; then
MINIO_SERVER_VERSION=${MINIO_SERVER_VERSION:-2022-09-07T22-25-02Z}
MINIO_CLIENT_VERSION=${MINIO_CLIENT_VERSION:-2022-08-28T20-08-11Z}
case $(uname -m) in
x86_64) BIN_ARCH=amd64 ;;
aarch64) BIN_ARCH=arm64 ;;
*) echo "unknown architecture $(uname -m)"; exit 1 ;;
esac
echo 'MinIO binary not found, downloading...'
BINARY_TYPE=$(uname -s | tr '[:upper:]' '[:lower:]')
wget "https://dl.min.io/server/minio/release/${BINARY_TYPE}-${BIN_ARCH}/archive/minio.RELEASE.${MINIO_SERVER_VERSION}" -O ./minio \
&& wget "https://dl.min.io/client/mc/release/${BINARY_TYPE}-${BIN_ARCH}/archive/mc.RELEASE.${MINIO_CLIENT_VERSION}" -O ./mc \
&& chmod +x ./mc ./minio
fi
MINIO_ROOT_USER=${MINIO_ROOT_USER:-clickhouse}
MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD:-clickhouse}
./minio --version
./minio server --address ":11111" ./minio_data &
i=0
while ! curl -v --silent http://localhost:11111 2>&1 | grep AccessDenied
do
if [[ $i == 60 ]]; then
echo "Failed to setup minio"
exit 0
fi fi
echo "Trying to connect to minio" case "$1" in
sleep 1 stateless)
i=$((i + 1)) query_dir="0_stateless"
done ;;
stateful)
query_dir="1_stateful"
;;
*)
echo "unknown test type ${test_type}"
usage
;;
esac
echo ${query_dir}
}
lsof -i :11111 find_arch() {
local arch
case $(uname -m) in
x86_64)
arch="amd64"
;;
aarch64)
arch="arm64"
;;
*)
echo "unknown architecture $(uname -m)";
exit 1
;;
esac
echo ${arch}
}
sleep 5 find_os() {
local os
os=$(uname -s | tr '[:upper:]' '[:lower:]')
echo "${os}"
}
./mc alias set clickminio http://localhost:11111 clickhouse clickhouse download_minio() {
./mc admin user add clickminio test testtest local os
./mc admin policy set clickminio readwrite user=test local arch
./mc mb clickminio/test local minio_server_version=${MINIO_SERVER_VERSION:-2022-09-07T22-25-02Z}
if [ "$TEST_TYPE" = "stateless" ]; then local minio_client_version=${MINIO_CLIENT_VERSION:-2022-08-28T20-08-11Z}
./mc policy set public clickminio/test
fi
os=$(find_os)
arch=$(find_arch)
wget "https://dl.min.io/server/minio/release/${os}-${arch}/archive/minio.RELEASE.${minio_server_version}" -O ./minio
wget "https://dl.min.io/client/mc/release/${os}-${arch}/archive/mc.RELEASE.${minio_client_version}" -O ./mc
chmod +x ./mc ./minio
}
# Upload data to Minio. By default after unpacking all tests will in start_minio() {
# /usr/share/clickhouse-test/queries mkdir -p ./minio_data
./minio --version
./minio server --address ":11111" ./minio_data &
wait_for_it
lsof -i :11111
sleep 5
}
TEST_PATH=${1:-/usr/share/clickhouse-test} setup_minio() {
MINIO_DATA_PATH=${TEST_PATH}/queries/${QUERY_DIR}/data_minio local test_type=$1
./mc alias set clickminio http://localhost:11111 clickhouse clickhouse
./mc admin user add clickminio test testtest
./mc admin policy set clickminio readwrite user=test
./mc mb clickminio/test
if [ "$test_type" = "stateless" ]; then
./mc policy set public clickminio/test
fi
}
# Iterating over globs will cause redundant FILE variable to be a path to a file, not a filename # uploads data to minio, by default after unpacking all tests
# shellcheck disable=SC2045 # will be in /usr/share/clickhouse-test/queries
for FILE in $(ls "${MINIO_DATA_PATH}"); do upload_data() {
echo "$FILE"; local query_dir=$1
./mc cp "${MINIO_DATA_PATH}"/"$FILE" clickminio/test/"$FILE"; local test_path=$2
done local data_path=${test_path}/queries/${query_dir}/data_minio
mkdir -p ~/.aws # iterating over globs will cause redundant file variable to be
cat <<EOT >> ~/.aws/credentials # a path to a file, not a filename
# shellcheck disable=SC2045
for file in $(ls "${data_path}"); do
echo "${file}";
./mc cp "${data_path}"/"${file}" clickminio/test/"${file}";
done
}
setup_aws_credentials() {
local minio_root_user=${MINIO_ROOT_USER:-clickhouse}
local minio_root_password=${MINIO_ROOT_PASSWORD:-clickhouse}
mkdir -p ~/.aws
cat <<EOT >> ~/.aws/credentials
[default] [default]
aws_access_key_id=${MINIO_ROOT_USER} aws_access_key_id=${minio_root_user}
aws_secret_access_key=${MINIO_ROOT_PASSWORD} aws_secret_access_key=${minio_root_password}
EOT EOT
}
wait_for_it() {
local counter=0
local max_counter=60
local url="http://localhost:11111"
local params=(
--silent
--verbose
)
while ! curl "${params[@]}" "${url}" 2>&1 | grep AccessDenied
do
if [[ ${counter} == "${max_counter}" ]]; then
echo "failed to setup minio"
exit 0
fi
echo "trying to connect to minio"
sleep 1
counter=$((counter + 1))
done
}
main() {
local query_dir
query_dir=$(check_arg "$@")
if [ ! -f ./minio ]; then
download_minio
fi
start_minio
setup_minio "$1"
upload_data "${query_dir}" "${2:-/usr/share/clickhouse-test}"
setup_aws_credentials
}
main "$@"

View File

@ -53,6 +53,7 @@ function configure()
local total_mem local total_mem
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
total_mem=$(( total_mem*1024 )) # bytes total_mem=$(( total_mem*1024 )) # bytes
# Set maximum memory usage as half of total memory (less chance of OOM). # Set maximum memory usage as half of total memory (less chance of OOM).
# #
# But not via max_server_memory_usage but via max_memory_usage_for_user, # But not via max_server_memory_usage but via max_memory_usage_for_user,
@ -65,16 +66,17 @@ function configure()
# max_server_memory_usage will be hard limit, and queries that should be # max_server_memory_usage will be hard limit, and queries that should be
# executed regardless memory limits will use max_memory_usage_for_user=0, # executed regardless memory limits will use max_memory_usage_for_user=0,
# instead of relying on max_untracked_memory # instead of relying on max_untracked_memory
local max_server_mem
max_server_mem=$((total_mem*75/100)) # 75% max_server_memory_usage_to_ram_ratio=0.5
echo "Setting max_server_memory_usage=$max_server_mem" echo "Setting max_server_memory_usage_to_ram_ratio to ${max_server_memory_usage_to_ram_ratio}"
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
<clickhouse> <clickhouse>
<max_server_memory_usage>${max_server_mem}</max_server_memory_usage> <max_server_memory_usage_to_ram_ratio>${max_server_memory_usage_to_ram_ratio}</max_server_memory_usage_to_ram_ratio>
</clickhouse> </clickhouse>
EOL EOL
local max_users_mem local max_users_mem
max_users_mem=$((total_mem*50/100)) # 50% max_users_mem=$((total_mem*30/100)) # 30%
echo "Setting max_memory_usage_for_user=$max_users_mem" echo "Setting max_memory_usage_for_user=$max_users_mem"
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
<clickhouse> <clickhouse>
@ -97,6 +99,13 @@ EOL
--> -->
<core_path>$PWD</core_path> <core_path>$PWD</core_path>
</clickhouse> </clickhouse>
EOL
# Let OOM killer terminate other processes before clickhouse-server:
cat > /etc/clickhouse-server/config.d/oom_score.xml <<EOL
<clickhouse>
<oom_score>-1000</oom_score>
</clickhouse>
EOL EOL
# Analyzer is not yet ready for testing # Analyzer is not yet ready for testing

View File

@ -118,7 +118,6 @@ Builds ClickHouse in various configurations for use in further steps. You have t
- **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`). - **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`).
- **Build type**: `Debug` or `RelWithDebInfo` (cmake). - **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan). - **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Split** `splitted` is a [split build](../development/build.md#split-build)
- **Status**: `success` or `fail` - **Status**: `success` or `fail`
- **Build log**: link to the building and files copying log, useful when build failed. - **Build log**: link to the building and files copying log, useful when build failed.
- **Build time**. - **Build time**.
@ -130,7 +129,6 @@ Builds ClickHouse in various configurations for use in further steps. You have t
- `clickhouse`: Main built binary. - `clickhouse`: Main built binary.
- `clickhouse-odbc-bridge` - `clickhouse-odbc-bridge`
- `unit_tests_dbms`: GoogleTest binary with ClickHouse unit tests. - `unit_tests_dbms`: GoogleTest binary with ClickHouse unit tests.
- `shared_build.tgz`: build with shared libraries.
- `performance.tgz`: Special package for performance tests. - `performance.tgz`: Special package for performance tests.
@ -169,16 +167,6 @@ concurrency-related errors. If it fails:
of error. of error.
## Split Build Smoke Test
Checks that the server build in [split build](../development/developer-instruction.md#split-build)
configuration can start and run simple queries. If it fails:
* Fix other test errors first;
* Build the server in [split build](../development/developer-instruction.md#split-build) configuration
locally and check whether it can start and run `select 1`.
## Compatibility Check ## Compatibility Check
Checks that `clickhouse` binary runs on distributions with old libc versions. If it fails, ask a maintainer for help. Checks that `clickhouse` binary runs on distributions with old libc versions. If it fails, ask a maintainer for help.

View File

@ -9,6 +9,29 @@ slug: /en/operations/backup
- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk) - [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk)
- [Alternatives](#alternatives) - [Alternatives](#alternatives)
## Command summary
```bash
BACKUP|RESTORE
TABLE [db.]table_name [AS [db.]table_name_in_backup]
[PARTITION[S] partition_expr [,...]] |
DICTIONARY [db.]dictionary_name [AS [db.]name_in_backup] |
DATABASE database_name [AS database_name_in_backup]
[EXCEPT TABLES ...] |
TEMPORARY TABLE table_name [AS table_name_in_backup] |
VIEW view_name [AS view_name_in_backup]
ALL TEMPORARY TABLES [EXCEPT ...] |
ALL DATABASES [EXCEPT ...] } [,...]
[ON CLUSTER 'cluster_name']
TO|FROM File('<path>/<filename>') | Disk('<disk_name>', '<path>/') | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')
[SETTINGS base_backup = File('<path>/<filename>') | Disk(...) | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')]
```
:::note ALL
`ALL` is only applicable to the `RESTORE` command.
:::
## Background ## Background
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you cant just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented. While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you cant just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.

View File

@ -127,6 +127,13 @@ Default value: 100000.
A large number of parts in a table reduces performance of ClickHouse queries and increases ClickHouse boot time. Most often this is a consequence of an incorrect design (mistakes when choosing a partitioning strategy - too small partitions). A large number of parts in a table reduces performance of ClickHouse queries and increases ClickHouse boot time. Most often this is a consequence of an incorrect design (mistakes when choosing a partitioning strategy - too small partitions).
## simultaneous_parts_removal_limit {#simultaneous-parts-removal-limit}
If there are a lot of outdated parts cleanup thread will try to delete up to `simultaneous_parts_removal_limit` parts during one iteration.
`simultaneous_parts_removal_limit` set to `0` means unlimited.
Default value: 0.
## replicated_deduplication_window {#replicated-deduplication-window} ## replicated_deduplication_window {#replicated-deduplication-window}
The number of most recently inserted blocks for which ClickHouse Keeper stores hash sums to check for duplicates. The number of most recently inserted blocks for which ClickHouse Keeper stores hash sums to check for duplicates.

View File

@ -410,35 +410,35 @@ Converts a date with time to a certain fixed date, while preserving the time.
## toRelativeYearNum ## toRelativeYearNum
Converts a date with time or date to the number of the year, starting from a certain fixed point in the past. Converts a date or date with time to the number of the year, starting from a certain fixed point in the past.
## toRelativeQuarterNum ## toRelativeQuarterNum
Converts a date with time or date to the number of the quarter, starting from a certain fixed point in the past. Converts a date or date with time to the number of the quarter, starting from a certain fixed point in the past.
## toRelativeMonthNum ## toRelativeMonthNum
Converts a date with time or date to the number of the month, starting from a certain fixed point in the past. Converts a date or date with time to the number of the month, starting from a certain fixed point in the past.
## toRelativeWeekNum ## toRelativeWeekNum
Converts a date with time or date to the number of the week, starting from a certain fixed point in the past. Converts a date or date with time to the number of the week, starting from a certain fixed point in the past.
## toRelativeDayNum ## toRelativeDayNum
Converts a date with time or date to the number of the day, starting from a certain fixed point in the past. Converts a date or date with time to the number of the day, starting from a certain fixed point in the past.
## toRelativeHourNum ## toRelativeHourNum
Converts a date with time or date to the number of the hour, starting from a certain fixed point in the past. Converts a date or date with time to the number of the hour, starting from a certain fixed point in the past.
## toRelativeMinuteNum ## toRelativeMinuteNum
Converts a date with time or date to the number of the minute, starting from a certain fixed point in the past. Converts a date or date with time to the number of the minute, starting from a certain fixed point in the past.
## toRelativeSecondNum ## toRelativeSecondNum
Converts a date with time or date to the number of the second, starting from a certain fixed point in the past. Converts a date or date with time to the number of the second, starting from a certain fixed point in the past.
## toISOYear ## toISOYear
@ -517,6 +517,154 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
└────────────┴───────────┴───────────┴───────────┘ └────────────┴───────────┴───────────┴───────────┘
``` ```
## age
Returns the `unit` component of the difference between `startdate` and `enddate`. The difference is calculated using a precision of 1 second.
E.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for `day` unit, 0 months for `month` unit, 0 years for `year` unit.
**Syntax**
``` sql
age('unit', startdate, enddate, [timezone])
```
**Arguments**
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
- `day` (possible abbreviations: `dd`, `d`)
- `week` (possible abbreviations: `wk`, `ww`)
- `month` (possible abbreviations: `mm`, `m`)
- `quarter` (possible abbreviations: `qq`, `q`)
- `year` (possible abbreviations: `yyyy`, `yy`)
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
**Returned value**
Difference between `enddate` and `startdate` expressed in `unit`.
Type: [Int](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
```
Result:
``` text
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 24 │
└───────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
age('day', s, e) AS day_age,
age('month', s, e) AS month__age,
age('year', s, e) AS year_age;
```
Result:
``` text
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
└────────────┴────────────┴─────────┴────────────┴──────────┘
```
## date\_diff
Returns the count of the specified `unit` boundaries crossed between the `startdate` and `enddate`.
The difference is calculated using relative units, e.g. the difference between `2021-12-29` and `2022-01-01` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
**Syntax**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Aliases: `dateDiff`, `DATE_DIFF`.
**Arguments**
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `second` (possible abbreviations: `ss`, `s`)
- `minute` (possible abbreviations: `mi`, `n`)
- `hour` (possible abbreviations: `hh`, `h`)
- `day` (possible abbreviations: `dd`, `d`)
- `week` (possible abbreviations: `wk`, `ww`)
- `month` (possible abbreviations: `mm`, `m`)
- `quarter` (possible abbreviations: `qq`, `q`)
- `year` (possible abbreviations: `yyyy`, `yy`)
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
**Returned value**
Difference between `enddate` and `startdate` expressed in `unit`.
Type: [Int](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Result:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
dateDiff('day', s, e) AS day_diff,
dateDiff('month', s, e) AS month__diff,
dateDiff('year', s, e) AS year_diff;
```
Result:
``` text
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
└────────────┴────────────┴──────────┴─────────────┴───────────┘
```
## date\_trunc ## date\_trunc
Truncates date and time data to the specified part of date. Truncates date and time data to the specified part of date.
@ -637,80 +785,6 @@ Result:
└───────────────────────────────────────────────┘ └───────────────────────────────────────────────┘
``` ```
## date\_diff
Returns the difference between two dates or dates with time values.
The difference is calculated using relative units, e.g. the difference between `2022-01-01` and `2021-12-29` is 3 days for day unit (see [toRelativeDayNum](#torelativedaynum)), 1 month for month unit (see [toRelativeMonthNum](#torelativemonthnum)), 1 year for year unit (see [toRelativeYearNum](#torelativeyearnum)).
**Syntax**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Aliases: `dateDiff`, `DATE_DIFF`.
**Arguments**
- `unit` — The type of interval for result. [String](../../sql-reference/data-types/string.md).
Possible values:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `startdate` — The first time value to subtract (the subtrahend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — The second time value to subtract from (the minuend). [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) or [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional). If specified, it is applied to both `startdate` and `enddate`. If not specified, timezones of `startdate` and `enddate` are used. If they are not the same, the result is unspecified. [String](../../sql-reference/data-types/string.md).
**Returned value**
Difference between `enddate` and `startdate` expressed in `unit`.
Type: [Int](../../sql-reference/data-types/int-uint.md).
**Example**
Query:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Result:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
Query:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
dateDiff('day', s, e) AS day_diff,
dateDiff('month', s, e) AS month__diff,
dateDiff('year', s, e) AS year_diff;
```
Result:
``` text
┌──────────e─┬──────────s─┬─day_diff─┬─month__diff─┬─year_diff─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 1 │ 1 │
└────────────┴────────────┴──────────┴─────────────┴───────────┘
```
## date\_sub ## date\_sub
Subtracts the time interval or date interval from the provided date or date with time. Subtracts the time interval or date interval from the provided date or date with time.

View File

@ -169,12 +169,6 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
cmake -D CMAKE_BUILD_TYPE=Debug .. cmake -D CMAKE_BUILD_TYPE=Debug ..
В случае использования на разработческой машине старого HDD или SSD, а также при желании использовать меньше места для артефактов сборки можно использовать следующую команду:
```bash
cmake -DUSE_DEBUG_HELPERS=1 -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 ..
```
При этом надо учесть, что получаемые в результате сборки исполнимые файлы будут динамически слинкованы с библиотеками, и поэтому фактически станут непереносимыми на другие компьютеры (либо для этого нужно будет предпринять значительно больше усилий по сравнению со статической сборкой). Плюсом же в данном случае является значительно меньшее время сборки (это проявляется не на первой сборке, а на последующих, после внесения изменений в исходный код - тратится меньшее время на линковку по сравнению со статической сборкой) и значительно меньшее использование места на жёстком диске (экономия более, чем в 3 раза по сравнению со статической сборкой). Для целей разработки, когда планируются только отладочные запуски на том же компьютере, где осуществлялась сборка, это может быть наиболее удобным вариантом.
Вы можете изменить вариант сборки, выполнив новую команду в директории build. Вы можете изменить вариант сборки, выполнив новую команду в директории build.
Запустите ninja для сборки: Запустите ninja для сборки:

View File

@ -424,23 +424,23 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
## toRelativeYearNum {#torelativeyearnum} ## toRelativeYearNum {#torelativeyearnum}
Переводит дату-с-временем или дату в номер года, начиная с некоторого фиксированного момента в прошлом. Переводит дату или дату-с-временем в номер года, начиная с некоторого фиксированного момента в прошлом.
## toRelativeQuarterNum {#torelativequarternum} ## toRelativeQuarterNum {#torelativequarternum}
Переводит дату-с-временем или дату в номер квартала, начиная с некоторого фиксированного момента в прошлом. Переводит дату или дату-с-временем в номер квартала, начиная с некоторого фиксированного момента в прошлом.
## toRelativeMonthNum {#torelativemonthnum} ## toRelativeMonthNum {#torelativemonthnum}
Переводит дату-с-временем или дату в номер месяца, начиная с некоторого фиксированного момента в прошлом. Переводит дату или дату-с-временем в номер месяца, начиная с некоторого фиксированного момента в прошлом.
## toRelativeWeekNum {#torelativeweeknum} ## toRelativeWeekNum {#torelativeweeknum}
Переводит дату-с-временем или дату в номер недели, начиная с некоторого фиксированного момента в прошлом. Переводит дату или дату-с-временем в номер недели, начиная с некоторого фиксированного момента в прошлом.
## toRelativeDayNum {#torelativedaynum} ## toRelativeDayNum {#torelativedaynum}
Переводит дату-с-временем или дату в номер дня, начиная с некоторого фиксированного момента в прошлом. Переводит дату или дату-с-временем в номер дня, начиная с некоторого фиксированного момента в прошлом.
## toRelativeHourNum {#torelativehournum} ## toRelativeHourNum {#torelativehournum}
@ -456,7 +456,7 @@ WITH toDateTime64('2020-01-01 10:20:30.999', 3) AS dt64 SELECT toStartOfSecond(d
## toISOYear {#toisoyear} ## toISOYear {#toisoyear}
Переводит дату-с-временем или дату в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января. Переводит дату или дату-с-временем в число типа UInt16, содержащее номер ISO года. ISO год отличается от обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) ISO год начинается необязательно первого января.
**Пример** **Пример**
@ -479,7 +479,7 @@ SELECT
## toISOWeek {#toisoweek} ## toISOWeek {#toisoweek}
Переводит дату-с-временем или дату в число типа UInt8, содержащее номер ISO недели. Переводит дату или дату-с-временем в число типа UInt8, содержащее номер ISO недели.
Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году. Начало ISO года отличается от начала обычного года, потому что в соответствии с [ISO 8601:1988](https://en.wikipedia.org/wiki/ISO_8601) первая неделя года - это неделя с четырьмя или более днями в этом году.
1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года. 1 Января 2017 г. - воскресение, т.е. первая ISO неделя 2017 года началась в понедельник 2 января, поэтому 1 января 2017 это последняя неделя 2016 года.
@ -503,7 +503,7 @@ SELECT
``` ```
## toWeek(date\[, mode\]\[, timezone\]) {#toweek} ## toWeek(date\[, mode\]\[, timezone\]) {#toweek}
Переводит дату-с-временем или дату в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0. Переводит дату или дату-с-временем в число UInt8, содержащее номер недели. Второй аргументам mode задает режим, начинается ли неделя с воскресенья или с понедельника и должно ли возвращаемое значение находиться в диапазоне от 0 до 53 или от 1 до 53. Если аргумент mode опущен, то используется режим 0.
`toISOWeek() ` эквивалентно `toWeek(date,3)`. `toISOWeek() ` эквивалентно `toWeek(date,3)`.
@ -569,6 +569,132 @@ SELECT toDate('2016-12-27') AS date, toYearWeek(date) AS yearWeek0, toYearWeek(d
└────────────┴───────────┴───────────┴───────────┘ └────────────┴───────────┴───────────┴───────────┘
``` ```
## age
Вычисляет компонент `unit` разницы между `startdate` и `enddate`. Разница вычисляется с точностью в 1 секунду.
Например, разница между `2021-12-29` и `2022-01-01` 3 дня для единицы `day`, 0 месяцев для единицы `month`, 0 лет для единицы `year`.
**Синтаксис**
``` sql
age('unit', startdate, enddate, [timezone])
```
**Аргументы**
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
- `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`)
- `day` (возможные сокращения: `dd`, `d`)
- `week` (возможные сокращения: `wk`, `ww`)
- `month` (возможные сокращения: `mm`, `m`)
- `quarter` (возможные сокращения: `qq`, `q`)
- `year` (возможные сокращения: `yyyy`, `yy`)
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Разница между `enddate` и `startdate`, выраженная в `unit`.
Тип: [Int](../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'));
```
Результат:
``` text
┌─age('hour', toDateTime('2018-01-01 22:30:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 24 │
└───────────────────────────────────────────────────────────────────────────────────┘
```
Запрос:
``` sql
SELECT
toDate('2022-01-01') AS e,
toDate('2021-12-29') AS s,
age('day', s, e) AS day_age,
age('month', s, e) AS month__age,
age('year', s, e) AS year_age;
```
Результат:
``` text
┌──────────e─┬──────────s─┬─day_age─┬─month__age─┬─year_age─┐
│ 2022-01-01 │ 2021-12-29 │ 3 │ 0 │ 0 │
└────────────┴────────────┴─────────┴────────────┴──────────┘
```
## date\_diff {#date_diff}
Вычисляет разницу указанных границ `unit` пересекаемых между `startdate` и `enddate`.
**Синтаксис**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Синонимы: `dateDiff`, `DATE_DIFF`.
**Аргументы**
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
- `second` (возможные сокращения: `ss`, `s`)
- `minute` (возможные сокращения: `mi`, `n`)
- `hour` (возможные сокращения: `hh`, `h`)
- `day` (возможные сокращения: `dd`, `d`)
- `week` (возможные сокращения: `wk`, `ww`)
- `month` (возможные сокращения: `mm`, `m`)
- `quarter` (возможные сокращения: `qq`, `q`)
- `year` (возможные сокращения: `yyyy`, `yy`)
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Разница между `enddate` и `startdate`, выраженная в `unit`.
Тип: [Int](../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Результат:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## date_trunc {#date_trunc} ## date_trunc {#date_trunc}
Отсекает от даты и времени части, меньшие чем указанная часть. Отсекает от даты и времени части, меньшие чем указанная часть.
@ -689,60 +815,6 @@ SELECT date_add(YEAR, 3, toDate('2018-01-01'));
└───────────────────────────────────────────────┘ └───────────────────────────────────────────────┘
``` ```
## date\_diff {#date_diff}
Вычисляет разницу между двумя значениями дат или дат со временем.
**Синтаксис**
``` sql
date_diff('unit', startdate, enddate, [timezone])
```
Синонимы: `dateDiff`, `DATE_DIFF`.
**Аргументы**
- `unit` — единица измерения времени, в которой будет выражено возвращаемое значение функции. [String](../../sql-reference/data-types/string.md).
Возможные значения:
- `second`
- `minute`
- `hour`
- `day`
- `week`
- `month`
- `quarter`
- `year`
- `startdate` — первая дата или дата со временем, которая вычитается из `enddate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `enddate` — вторая дата или дата со временем, из которой вычитается `startdate`. [Date](../../sql-reference/data-types/date.md), [Date32](../../sql-reference/data-types/date32.md), [DateTime](../../sql-reference/data-types/datetime.md) или [DateTime64](../../sql-reference/data-types/datetime64.md).
- `timezone` — [часовой пояс](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (необязательно). Если этот аргумент указан, то он применяется как для `startdate`, так и для `enddate`. Если этот аргумент не указан, то используются часовые пояса аргументов `startdate` и `enddate`. Если часовые пояса аргументов `startdate` и `enddate` не совпадают, то результат не определен. [String](../../sql-reference/data-types/string.md).
**Возвращаемое значение**
Разница между `enddate` и `startdate`, выраженная в `unit`.
Тип: [Int](../../sql-reference/data-types/int-uint.md).
**Пример**
Запрос:
``` sql
SELECT dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'));
```
Результат:
``` text
┌─dateDiff('hour', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00'))─┐
│ 25 │
└────────────────────────────────────────────────────────────────────────────────────────┘
```
## date\_sub {#date_sub} ## date\_sub {#date_sub}
Вычитает интервал времени или даты из указанной даты или даты со временем. Вычитает интервал времени или даты из указанной даты или даты со временем.

View File

@ -85,7 +85,6 @@ git push
- **Build type**: `Debug` or `RelWithDebInfo` (cmake). - **Build type**: `Debug` or `RelWithDebInfo` (cmake).
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan). - **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
- **Bundled**: `bundled` 构建使用来自 `contrib` 库, 而 `unbundled` 构建使用系统库. - **Bundled**: `bundled` 构建使用来自 `contrib` 库, 而 `unbundled` 构建使用系统库.
- **Splitted**: `splitted` is a [split build](https://clickhouse.com/docs/en/development/build/#split-build)
- **Status**: `成功``失败` - **Status**: `成功``失败`
- **Build log**: 链接到构建和文件复制日志, 当构建失败时很有用. - **Build log**: 链接到构建和文件复制日志, 当构建失败时很有用.
- **Build time**. - **Build time**.
@ -97,7 +96,6 @@ git push
- `clickhouse`: Main built binary. - `clickhouse`: Main built binary.
- `clickhouse-odbc-bridge` - `clickhouse-odbc-bridge`
- `unit_tests_dbms`: 带有 ClickHouse 单元测试的 GoogleTest 二进制文件. - `unit_tests_dbms`: 带有 ClickHouse 单元测试的 GoogleTest 二进制文件.
- `shared_build.tgz`: 使用共享库构建.
- `performance.tgz`: 用于性能测试的特殊包. - `performance.tgz`: 用于性能测试的特殊包.
## 特殊构建检查 {#special-buildcheck} ## 特殊构建检查 {#special-buildcheck}
@ -123,14 +121,6 @@ git push
of error. of error.
``` ```
## 冒烟测试 {#split-build-smoke-test}
检查[拆分构建](./build.md#split-build)配置中的服务器构建是否可以启动并运行简单查询.如果失败:
```
* Fix other test errors first;
* Build the server in [split build](./build.md#split-build) configuration
locally and check whether it can start and run `select 1`.
```
## 兼容性检查 {#compatibility-check} ## 兼容性检查 {#compatibility-check}
检查`clickhouse`二进制文件是否可以在带有旧libc版本的发行版上运行.如果失败, 请向维护人员寻求帮助. 检查`clickhouse`二进制文件是否可以在带有旧libc版本的发行版上运行.如果失败, 请向维护人员寻求帮助.

View File

@ -13,12 +13,6 @@ option (ENABLE_CLICKHOUSE_SERVER "Server mode (main mode)" ${ENABLE_CLICKHOUSE_A
option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)" option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)"
${ENABLE_CLICKHOUSE_ALL}) ${ENABLE_CLICKHOUSE_ALL})
# Don't create self-extracting clickhouse for split build
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING AND SPLIT_SHARED_LIBRARIES)
message (STATUS "Self-extracting on split build is not supported")
unset (ENABLE_CLICKHOUSE_SELF_EXTRACTING CACHE)
endif ()
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/ # https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/
option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL}) option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL})
@ -173,10 +167,6 @@ else()
message(STATUS "ClickHouse keeper-converter mode: OFF") message(STATUS "ClickHouse keeper-converter mode: OFF")
endif() endif()
if(NOT (USE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES))
set(CLICKHOUSE_ONE_SHARED ON)
endif()
if (ENABLE_CLICKHOUSE_DISKS) if (ENABLE_CLICKHOUSE_DISKS)
message(STATUS "Clickhouse disks mode: ON") message(STATUS "Clickhouse disks mode: ON")
else() else()
@ -192,11 +182,7 @@ endif()
configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h) configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h)
macro(clickhouse_target_link_split_lib target name) macro(clickhouse_target_link_split_lib target name)
if(NOT CLICKHOUSE_ONE_SHARED) target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
else()
target_link_libraries(${target} PRIVATE clickhouse-lib)
endif()
endmacro() endmacro()
macro(clickhouse_program_add_library name) macro(clickhouse_program_add_library name)
@ -208,18 +194,16 @@ macro(clickhouse_program_add_library name)
set(CLICKHOUSE_${name_uc}_LINK ${CLICKHOUSE_${name_uc}_LINK} PARENT_SCOPE) set(CLICKHOUSE_${name_uc}_LINK ${CLICKHOUSE_${name_uc}_LINK} PARENT_SCOPE)
set(CLICKHOUSE_${name_uc}_INCLUDE ${CLICKHOUSE_${name_uc}_INCLUDE} PARENT_SCOPE) set(CLICKHOUSE_${name_uc}_INCLUDE ${CLICKHOUSE_${name_uc}_INCLUDE} PARENT_SCOPE)
if(NOT CLICKHOUSE_ONE_SHARED) add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES})
add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES})
set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if() set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if()
if(_link) if(_link)
target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK}) target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK})
endif() endif()
set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if() set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if()
if (_include) if (_include)
target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE}) target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE})
endif()
endif() endif()
endmacro() endmacro()
@ -263,68 +247,8 @@ if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
add_subdirectory (self-extracting) add_subdirectory (self-extracting)
endif () endif ()
if (CLICKHOUSE_ONE_SHARED)
add_library(clickhouse-lib SHARED
${CLICKHOUSE_SERVER_SOURCES}
${CLICKHOUSE_CLIENT_SOURCES}
${CLICKHOUSE_LOCAL_SOURCES}
${CLICKHOUSE_BENCHMARK_SOURCES}
${CLICKHOUSE_COPIER_SOURCES}
${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES}
${CLICKHOUSE_COMPRESSOR_SOURCES}
${CLICKHOUSE_FORMAT_SOURCES}
${CLICKHOUSE_OBFUSCATOR_SOURCES}
${CLICKHOUSE_GIT_IMPORT_SOURCES}
${CLICKHOUSE_ODBC_BRIDGE_SOURCES}
${CLICKHOUSE_KEEPER_SOURCES}
${CLICKHOUSE_KEEPER_CONVERTER_SOURCES}
${CLICKHOUSE_STATIC_FILES_DISK_UPLOADER_SOURCES}
${CLICKHOUSE_SU_SOURCES})
target_link_libraries(clickhouse-lib
${CLICKHOUSE_SERVER_LINK}
${CLICKHOUSE_CLIENT_LINK}
${CLICKHOUSE_LOCAL_LINK}
${CLICKHOUSE_BENCHMARK_LINK}
${CLICKHOUSE_COPIER_LINK}
${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK}
${CLICKHOUSE_COMPRESSOR_LINK}
${CLICKHOUSE_FORMAT_LINK}
${CLICKHOUSE_OBFUSCATOR_LINK}
${CLICKHOUSE_GIT_IMPORT_LINK}
${CLICKHOUSE_ODBC_BRIDGE_LINK}
${CLICKHOUSE_KEEPER_LINK}
${CLICKHOUSE_KEEPER_CONVERTER_LINK}
${CLICKHOUSE_STATIC_FILES_DISK_UPLOADER_LINK}
${CLICKHOUSE_SU_LINK})
target_include_directories(clickhouse-lib
${CLICKHOUSE_SERVER_INCLUDE}
${CLICKHOUSE_CLIENT_INCLUDE}
${CLICKHOUSE_LOCAL_INCLUDE}
${CLICKHOUSE_BENCHMARK_INCLUDE}
${CLICKHOUSE_COPIER_INCLUDE}
${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE}
${CLICKHOUSE_COMPRESSOR_INCLUDE}
${CLICKHOUSE_FORMAT_INCLUDE}
${CLICKHOUSE_OBFUSCATOR_INCLUDE}
${CLICKHOUSE_GIT_IMPORT_INCLUDE}
${CLICKHOUSE_ODBC_BRIDGE_INCLUDE}
${CLICKHOUSE_KEEPER_INCLUDE}
${CLICKHOUSE_KEEPER_CONVERTER_INCLUDE})
set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "")
install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
endif()
clickhouse_add_executable (clickhouse main.cpp) clickhouse_add_executable (clickhouse main.cpp)
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
# Shared split (dev) build: In CI, the server is run with custom LD_LIBRARY_PATH. This makes the harmful env check re-execute the
# process in a clean environment but as in CI the containing directory is not included in DT_RUNPATH/DT_RPATH, the server won't come up.
target_compile_definitions(clickhouse PRIVATE DISABLE_HARMFUL_ENV_VAR_CHECK)
endif ()
# A library that prevent usage of several functions from libc. # A library that prevent usage of several functions from libc.
if (ARCH_AMD64 AND OS_LINUX AND NOT OS_ANDROID) if (ARCH_AMD64 AND OS_LINUX AND NOT OS_ANDROID)
set (HARMFUL_LIB harmful) set (HARMFUL_LIB harmful)

View File

@ -10,6 +10,4 @@ set (CLICKHOUSE_BENCHMARK_LINK
clickhouse_program_add(benchmark) clickhouse_program_add(benchmark)
if(NOT CLICKHOUSE_ONE_SHARED) target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib)
target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib)
endif()

View File

@ -1,12 +0,0 @@
#!/bin/sh
# Helper for split build mode.
# Allows to run commands like
# clickhouse client
# clickhouse server
# ...
set -e
CMD=$1
shift
clickhouse-$CMD $*

View File

@ -14,9 +14,7 @@ set (CLICKHOUSE_LOCAL_LINK
clickhouse_program_add(local) clickhouse_program_add(local)
if(NOT CLICKHOUSE_ONE_SHARED) target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
endif()
if (TARGET ch_rust::skim) if (TARGET ch_rust::skim)
target_link_libraries(clickhouse-local-lib PRIVATE ch_rust::skim) target_link_libraries(clickhouse-local-lib PRIVATE ch_rust::skim)

View File

@ -345,7 +345,7 @@ struct Checker
; ;
#if !defined(DISABLE_HARMFUL_ENV_VAR_CHECK) && !defined(USE_MUSL) #if !defined(USE_MUSL)
/// NOTE: We will migrate to full static linking or our own dynamic loader to make this code obsolete. /// NOTE: We will migrate to full static linking or our own dynamic loader to make this code obsolete.
void checkHarmfulEnvironmentVariables(char ** argv) void checkHarmfulEnvironmentVariables(char ** argv)
{ {
@ -457,7 +457,7 @@ int main(int argc_, char ** argv_)
/// Note: we forbid dlopen in our code. /// Note: we forbid dlopen in our code.
updatePHDRCache(); updatePHDRCache();
#if !defined(DISABLE_HARMFUL_ENV_VAR_CHECK) && !defined(USE_MUSL) #if !defined(USE_MUSL)
checkHarmfulEnvironmentVariables(argv_); checkHarmfulEnvironmentVariables(argv_);
#endif #endif

View File

@ -420,6 +420,33 @@ void Server::createServer(
} }
} }
#if defined(OS_LINUX)
namespace
{
void setOOMScore(int value, Poco::Logger * log)
{
try
{
std::string value_string = std::to_string(value);
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
buf.write(value_string.c_str(), value_string.size());
buf.next();
buf.close();
}
catch (const Poco::Exception & e)
{
LOG_WARNING(log, "Failed to adjust OOM score: '{}'.", e.displayText());
return;
}
LOG_INFO(log, "Set OOM score adjustment to {}", value);
}
}
#endif
void Server::uninitialize() void Server::uninitialize()
{ {
logger().information("shutting down"); logger().information("shutting down");
@ -881,6 +908,21 @@ try
} }
} }
} }
int default_oom_score = 0;
#if !defined(NDEBUG)
/// In debug version on Linux, increase oom score so that clickhouse is killed
/// first, instead of some service. Use a carefully chosen random score of 555:
/// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore
/// whatever errors that occur, because it's just a debugging aid and we don't
/// care if it breaks.
default_oom_score = 555;
#endif
int oom_score = config().getInt("oom_score", default_oom_score);
if (oom_score)
setOOMScore(oom_score, log);
#endif #endif
global_context->setRemoteHostFilter(config()); global_context->setRemoteHostFilter(config());

View File

@ -1464,4 +1464,8 @@
I don't recommend to change this setting. I don't recommend to change this setting.
<show_addresses_in_stack_traces>false</show_addresses_in_stack_traces> <show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
--> -->
<!-- On Linux systems this can control the behavior of OOM killer.
<oom_score>-1000</oom_score>
-->
</clickhouse> </clickhouse>

View File

@ -35,15 +35,7 @@ add_custom_command(OUTPUT ${ffi_binding_final_path}
DEPENDS cargo-build__ch_rust_skim_rust) DEPENDS cargo-build__ch_rust_skim_rust)
add_library(_ch_rust_skim_ffi ${ffi_binding_final_path}) add_library(_ch_rust_skim_ffi ${ffi_binding_final_path})
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
# static
else()
if (OS_DARWIN)
target_link_libraries(_ch_rust_skim_ffi PRIVATE -Wl,-undefined,dynamic_lookup)
else()
target_link_libraries(_ch_rust_skim_ffi PRIVATE -Wl,--unresolved-symbols=ignore-all)
endif()
endif()
# cxx bridge compiles such bindings # cxx bridge compiles such bindings
set_target_properties(_ch_rust_skim_ffi PROPERTIES COMPILE_FLAGS "${CXXBRIDGE_CXXFLAGS}") set_target_properties(_ch_rust_skim_ffi PROPERTIES COMPILE_FLAGS "${CXXBRIDGE_CXXFLAGS}")

View File

@ -11,11 +11,7 @@ if(COMPILER_PIPE)
else() else()
set(MAX_COMPILER_MEMORY 1500) set(MAX_COMPILER_MEMORY 1500)
endif() endif()
if(USE_STATIC_LIBRARIES) set(MAX_LINKER_MEMORY 3500)
set(MAX_LINKER_MEMORY 3500)
else()
set(MAX_LINKER_MEMORY 2500)
endif()
include(../cmake/limit_jobs.cmake) include(../cmake/limit_jobs.cmake)
include (../cmake/version.cmake) include (../cmake/version.cmake)
@ -200,10 +196,6 @@ endif ()
add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources}) add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources})
if (SPLIT_SHARED_LIBRARIES)
target_compile_definitions(clickhouse_common_io PRIVATE SPLIT_SHARED_LIBRARIES)
endif ()
add_library (clickhouse_malloc OBJECT Common/malloc.cpp) add_library (clickhouse_malloc OBJECT Common/malloc.cpp)
set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin") set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin")
@ -227,18 +219,7 @@ add_subdirectory(Common/Config)
set (all_modules) set (all_modules)
macro(add_object_library name common_path) macro(add_object_library name common_path)
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) add_headers_and_sources(dbms ${common_path})
add_headers_and_sources(dbms ${common_path})
else ()
list (APPEND all_modules ${name})
add_headers_and_sources(${name} ${common_path})
add_library(${name} SHARED ${${name}_sources} ${${name}_headers})
if (OS_DARWIN)
target_link_libraries (${name} PRIVATE -Wl,-undefined,dynamic_lookup)
else()
target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all)
endif()
endif ()
endmacro() endmacro()
add_object_library(clickhouse_access Access) add_object_library(clickhouse_access Access)
@ -297,28 +278,12 @@ if (TARGET ch_contrib::nuraft)
add_object_library(clickhouse_coordination Coordination) add_object_library(clickhouse_coordination Coordination)
endif() endif()
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) add_library (dbms STATIC ${dbms_headers} ${dbms_sources})
add_library (dbms STATIC ${dbms_headers} ${dbms_sources}) target_link_libraries (dbms PRIVATE ch_contrib::libdivide)
target_link_libraries (dbms PRIVATE ch_contrib::libdivide) if (TARGET ch_contrib::jemalloc)
if (TARGET ch_contrib::jemalloc) target_link_libraries (dbms PRIVATE ch_contrib::jemalloc)
target_link_libraries (dbms PRIVATE ch_contrib::jemalloc) endif()
endif() set (all_modules dbms)
set (all_modules dbms)
else()
add_library (dbms SHARED ${dbms_headers} ${dbms_sources})
target_link_libraries (dbms PUBLIC ${all_modules})
target_link_libraries (clickhouse_interpreters PRIVATE ch_contrib::libdivide)
if (TARGET ch_contrib::jemalloc)
target_link_libraries (clickhouse_interpreters PRIVATE ch_contrib::jemalloc)
endif()
list (APPEND all_modules dbms)
# force all split libs to be linked
if (OS_DARWIN)
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-undefined,error")
else()
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed")
endif()
endif ()
macro (dbms_target_include_directories) macro (dbms_target_include_directories)
foreach (module ${all_modules}) foreach (module ${all_modules})

View File

@ -109,8 +109,7 @@ public:
template <typename... Args> template <typename... Args>
[[nodiscard]] bool emplace(Args &&... args) [[nodiscard]] bool emplace(Args &&... args)
{ {
emplaceImpl(std::nullopt /* timeout in milliseconds */, std::forward<Args...>(args...)); return emplaceImpl(std::nullopt /* timeout in milliseconds */, std::forward<Args...>(args...));
return true;
} }
/// Returns false if queue is finished and empty /// Returns false if queue is finished and empty

View File

@ -1204,6 +1204,11 @@ public:
return res; return res;
} }
template <typename DateOrTime>
inline DateTimeComponents toDateTimeComponents(DateOrTime v) const
{
return toDateTimeComponents(lut[toLUTIndex(v)].date);
}
inline UInt64 toNumYYYYMMDDhhmmss(Time t) const inline UInt64 toNumYYYYMMDDhhmmss(Time t) const
{ {

View File

@ -48,6 +48,30 @@ struct ClearableHashTableCell : public BaseCell
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {} ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
}; };
using StringRefBaseCell = HashSetCellWithSavedHash<StringRef, DefaultHash<StringRef>, ClearableHashSetState>;
/// specialization for StringRef to allow zero size key (empty string)
template <>
struct ClearableHashTableCell<StringRef, StringRefBaseCell> : public StringRefBaseCell
{
using State = ClearableHashSetState;
using value_type = typename StringRefBaseCell::value_type;
UInt32 version;
bool isZero(const State & state) const { return version != state.version; }
static bool isZero(const StringRef & key_, const State & state_) { return StringRefBaseCell::isZero(key_, state_); }
/// Set the key value to zero.
void setZero() { version = 0; }
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
static constexpr bool need_zero_value_storage = true;
ClearableHashTableCell() { } //-V730 /// NOLINT
ClearableHashTableCell(const StringRef & key_, const State & state) : StringRefBaseCell(key_, state), version(state.version) { }
};
template < template <
typename Key, typename Key,
typename Hash = DefaultHash<Key>, typename Hash = DefaultHash<Key>,

View File

@ -31,7 +31,7 @@ private:
void init(time_t time, const DateLUTImpl & time_zone) void init(time_t time, const DateLUTImpl & time_zone)
{ {
DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(time); DateLUTImpl::DateTimeComponents components = time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(time));
m_year = components.date.year; m_year = components.date.year;
m_month = components.date.month; m_month = components.date.month;

View File

@ -352,7 +352,8 @@ time_t getModificationTime(const std::string & path)
struct stat st; struct stat st;
if (stat(path.c_str(), &st) == 0) if (stat(path.c_str(), &st) == 0)
return st.st_mtime; return st.st_mtime;
DB::throwFromErrnoWithPath("Cannot check modification time for file: " + path, path, DB::ErrorCodes::CANNOT_STAT); std::error_code m_ec(errno, std::generic_category());
throw fs::filesystem_error("Cannot check modification time for file", path, m_ec);
} }
time_t getChangeTime(const std::string & path) time_t getChangeTime(const std::string & path)
@ -360,7 +361,8 @@ time_t getChangeTime(const std::string & path)
struct stat st; struct stat st;
if (stat(path.c_str(), &st) == 0) if (stat(path.c_str(), &st) == 0)
return st.st_ctime; return st.st_ctime;
DB::throwFromErrnoWithPath("Cannot check change time for file: " + path, path, DB::ErrorCodes::CANNOT_STAT); std::error_code m_ec(errno, std::generic_category());
throw fs::filesystem_error("Cannot check change time for file", path, m_ec);
} }
Poco::Timestamp getModificationTimestamp(const std::string & path) Poco::Timestamp getModificationTimestamp(const std::string & path)

View File

@ -1,6 +1,6 @@
#include "remapExecutable.h" #include "remapExecutable.h"
#if defined(OS_LINUX) && defined(__amd64__) && defined(__SSE2__) && !defined(SANITIZER) && defined(NDEBUG) && !defined(SPLIT_SHARED_LIBRARIES) #if defined(OS_LINUX) && defined(__amd64__) && defined(__SSE2__) && !defined(SANITIZER) && defined(NDEBUG)
#include <sys/mman.h> #include <sys/mman.h>
#include <unistd.h> #include <unistd.h>

View File

@ -1203,7 +1203,7 @@ auto DDperformanceTestSequence()
+ generateSeq<ValueType>(G(SameValueGenerator(42)), 0, times); // best + generateSeq<ValueType>(G(SameValueGenerator(42)), 0, times); // best
} }
// prime numbers in ascending order with some random repitions hit all the cases of Gorilla. // prime numbers in ascending order with some random repetitions hit all the cases of Gorilla.
auto PrimesWithMultiplierGenerator = [](int multiplier = 1) auto PrimesWithMultiplierGenerator = [](int multiplier = 1)
{ {
return [multiplier](auto i) return [multiplier](auto i)

View File

@ -296,7 +296,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
return true; return true;
} }
void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async) void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async, const MultiVersion<Macros>::Version & macros)
{ {
LOG_DEBUG(log, "Initializing storage dispatcher"); LOG_DEBUG(log, "Initializing storage dispatcher");
@ -307,7 +307,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
responses_thread = ThreadFromGlobalPool([this] { responseThread(); }); responses_thread = ThreadFromGlobalPool([this] { responseThread(); });
snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); }); snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); });
snapshot_s3.startup(config); snapshot_s3.startup(config, macros);
server = std::make_unique<KeeperServer>(configuration_and_settings, config, responses_queue, snapshots_queue, snapshot_s3); server = std::make_unique<KeeperServer>(configuration_and_settings, config, responses_queue, snapshots_queue, snapshot_s3);
@ -687,7 +687,7 @@ bool KeeperDispatcher::isServerActive() const
return checkInit() && hasLeader() && !server->isRecovering(); return checkInit() && hasLeader() && !server->isRecovering();
} }
void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config) void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{ {
auto diff = server->getConfigurationDiff(config); auto diff = server->getConfigurationDiff(config);
if (diff.empty()) if (diff.empty())
@ -704,7 +704,7 @@ void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfigurati
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue"); throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue");
} }
snapshot_s3.updateS3Configuration(config); snapshot_s3.updateS3Configuration(config, macros);
} }
void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms) void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms)

View File

@ -15,6 +15,8 @@
#include <Coordination/Keeper4LWInfo.h> #include <Coordination/Keeper4LWInfo.h>
#include <Coordination/KeeperConnectionStats.h> #include <Coordination/KeeperConnectionStats.h>
#include <Coordination/KeeperSnapshotManagerS3.h> #include <Coordination/KeeperSnapshotManagerS3.h>
#include <Common/MultiVersion.h>
#include <Common/Macros.h>
namespace DB namespace DB
{ {
@ -109,7 +111,8 @@ public:
/// Initialization from config. /// Initialization from config.
/// standalone_keeper -- we are standalone keeper application (not inside clickhouse server) /// standalone_keeper -- we are standalone keeper application (not inside clickhouse server)
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async); /// 'macros' are used to substitute macros in endpoint of disks
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async, const MultiVersion<Macros>::Version & macros);
void startServer(); void startServer();
@ -124,7 +127,8 @@ public:
/// Registered in ConfigReloader callback. Add new configuration changes to /// Registered in ConfigReloader callback. Add new configuration changes to
/// update_configuration_queue. Keeper Dispatcher apply them asynchronously. /// update_configuration_queue. Keeper Dispatcher apply them asynchronously.
void updateConfiguration(const Poco::Util::AbstractConfiguration & config); /// 'macros' are used to substitute macros in endpoint of disks
void updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
/// Shutdown internal keeper parts (server, state machine, log storage, etc) /// Shutdown internal keeper parts (server, state machine, log storage, etc)
void shutdown(); void shutdown();

View File

@ -14,6 +14,7 @@
#include <IO/S3/PocoHTTPClient.h> #include <IO/S3/PocoHTTPClient.h>
#include <IO/WriteHelpers.h> #include <IO/WriteHelpers.h>
#include <IO/copyData.h> #include <IO/copyData.h>
#include <Common/Macros.h>
#include <aws/core/auth/AWSCredentials.h> #include <aws/core/auth/AWSCredentials.h>
#include <aws/s3/S3Client.h> #include <aws/s3/S3Client.h>
@ -47,7 +48,7 @@ KeeperSnapshotManagerS3::KeeperSnapshotManagerS3()
, uuid(UUIDHelpers::generateV4()) , uuid(UUIDHelpers::generateV4())
{} {}
void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config) void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{ {
try try
{ {
@ -64,7 +65,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config); auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config);
auto endpoint = config.getString(config_prefix + ".endpoint"); String endpoint = macros->expand(config.getString(config_prefix + ".endpoint"));
auto new_uri = S3::URI{endpoint}; auto new_uri = S3::URI{endpoint};
{ {
@ -261,9 +262,9 @@ void KeeperSnapshotManagerS3::uploadSnapshot(const std::string & path, bool asyn
uploadSnapshotImpl(path); uploadSnapshotImpl(path);
} }
void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config) void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{ {
updateS3Configuration(config); updateS3Configuration(config, macros);
snapshot_s3_thread = ThreadFromGlobalPool([this] { snapshotS3Thread(); }); snapshot_s3_thread = ThreadFromGlobalPool([this] { snapshotS3Thread(); });
} }

View File

@ -3,6 +3,8 @@
#include "config.h" #include "config.h"
#include <Poco/Util/AbstractConfiguration.h> #include <Poco/Util/AbstractConfiguration.h>
#include <Common/MultiVersion.h>
#include <Common/Macros.h>
#if USE_AWS_S3 #if USE_AWS_S3
#include <Common/ConcurrentBoundedQueue.h> #include <Common/ConcurrentBoundedQueue.h>
@ -21,10 +23,12 @@ class KeeperSnapshotManagerS3
public: public:
KeeperSnapshotManagerS3(); KeeperSnapshotManagerS3();
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config); /// 'macros' are used to substitute macros in endpoint of disks
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void uploadSnapshot(const std::string & path, bool async_upload = true); void uploadSnapshot(const std::string & path, bool async_upload = true);
void startup(const Poco::Util::AbstractConfiguration & config); /// 'macros' are used to substitute macros in endpoint of disks
void startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void shutdown(); void shutdown();
private: private:
using SnapshotS3Queue = ConcurrentBoundedQueue<std::string>; using SnapshotS3Queue = ConcurrentBoundedQueue<std::string>;
@ -56,10 +60,10 @@ class KeeperSnapshotManagerS3
public: public:
KeeperSnapshotManagerS3() = default; KeeperSnapshotManagerS3() = default;
void updateS3Configuration(const Poco::Util::AbstractConfiguration &) {} void updateS3Configuration(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
void uploadSnapshot(const std::string &, [[maybe_unused]] bool async_upload = true) {} void uploadSnapshot(const std::string &, [[maybe_unused]] bool async_upload = true) {}
void startup(const Poco::Util::AbstractConfiguration &) {} void startup(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
void shutdown() {} void shutdown() {}
}; };

View File

@ -36,7 +36,12 @@ void TinyContext::initializeKeeperDispatcher([[maybe_unused]] bool start_async)
if (config_ref.has("keeper_server")) if (config_ref.has("keeper_server"))
{ {
keeper_dispatcher = std::make_shared<KeeperDispatcher>(); keeper_dispatcher = std::make_shared<KeeperDispatcher>();
keeper_dispatcher->initialize(config_ref, true, start_async);
MultiVersion<Macros>::Version macros;
if (config_ref.has("macros"))
macros = std::make_unique<Macros>(config_ref, "macros", &Poco::Logger::get("TinyContext"));
keeper_dispatcher->initialize(config_ref, true, start_async, macros);
} }
} }
@ -71,7 +76,12 @@ void TinyContext::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::A
if (!keeper_dispatcher) if (!keeper_dispatcher)
return; return;
keeper_dispatcher->updateConfiguration(config_); MultiVersion<Macros>::Version macros;
if (config_.has("macros"))
macros = std::make_unique<Macros>(config_, "macros", &Poco::Logger::get("TinyContext"));
keeper_dispatcher->updateConfiguration(config_, macros);
} }
} }

View File

@ -602,34 +602,6 @@ void BaseDaemon::closeFDs()
} }
} }
namespace
{
/// In debug version on Linux, increase oom score so that clickhouse is killed
/// first, instead of some service. Use a carefully chosen random score of 555:
/// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore
/// whatever errors that occur, because it's just a debugging aid and we don't
/// care if it breaks.
#if defined(OS_LINUX) && !defined(NDEBUG)
void debugIncreaseOOMScore()
{
const std::string new_score = "555";
try
{
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
buf.write(new_score.c_str(), new_score.size());
buf.close();
}
catch (const Poco::Exception & e)
{
LOG_WARNING(&Poco::Logger::root(), "Failed to adjust OOM score: '{}'.", e.displayText());
return;
}
LOG_INFO(&Poco::Logger::root(), "Set OOM score adjustment to {}", new_score);
}
#else
void debugIncreaseOOMScore() {}
#endif
}
void BaseDaemon::initialize(Application & self) void BaseDaemon::initialize(Application & self)
{ {
@ -796,7 +768,6 @@ void BaseDaemon::initialize(Application & self)
initializeTerminationAndSignalProcessing(); initializeTerminationAndSignalProcessing();
logRevision(); logRevision();
debugIncreaseOOMScore();
for (const auto & key : DB::getMultipleKeysFromConfig(config(), "", "graphite")) for (const auto & key : DB::getMultipleKeysFromConfig(config(), "", "graphite"))
{ {

View File

@ -7,10 +7,6 @@ add_library (daemon
GitHash.generated.cpp GitHash.generated.cpp
) )
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
endif()
target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_parsers clickhouse_common_io clickhouse_common_config) target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_parsers clickhouse_common_io clickhouse_common_config)
if (TARGET ch_contrib::sentry) if (TARGET ch_contrib::sentry)

View File

@ -5,6 +5,7 @@
#include <Common/Exception.h> #include <Common/Exception.h>
#define MAX_FIXEDSTRING_SIZE 0xFFFFFF #define MAX_FIXEDSTRING_SIZE 0xFFFFFF
#define MAX_FIXEDSTRING_SIZE_WITHOUT_SUSPICIOUS 256
namespace DB namespace DB

View File

@ -539,11 +539,19 @@ String DatabaseOnDisk::getObjectMetadataPath(const String & object_name) const
time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_name) const time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_name) const
{ {
String table_metadata_path = getObjectMetadataPath(object_name); String table_metadata_path = getObjectMetadataPath(object_name);
try
if (fs::exists(table_metadata_path)) {
return FS::getModificationTime(table_metadata_path); return FS::getModificationTime(table_metadata_path);
else }
return static_cast<time_t>(0); catch (const fs::filesystem_error & e)
{
if (e.code() == std::errc::no_such_file_or_directory)
{
return static_cast<time_t>(0);
}
else
throw;
}
} }
void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const

View File

@ -1,36 +1,39 @@
#include <DataTypes/DataTypeString.h> #include <DataTypes/DataTypeString.h>
#include <Databases/DatabaseReplicated.h>
#include <IO/ReadBufferFromFile.h> #include <utility>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h> #include <Backups/IRestoreCoordination.h>
#include <IO/WriteHelpers.h> #include <Backups/RestorerFromBackup.h>
#include <Interpreters/Context.h> #include <base/chrono_io.h>
#include <Interpreters/executeQuery.h> #include <base/getFQDNOrHostName.h>
#include <Parsers/queryToString.h>
#include <Common/Exception.h> #include <Common/Exception.h>
#include <Common/Macros.h>
#include <Common/OpenTelemetryTraceContext.h> #include <Common/OpenTelemetryTraceContext.h>
#include <Common/ZooKeeper/KeeperException.h> #include <Common/ZooKeeper/KeeperException.h>
#include <Common/ZooKeeper/Types.h> #include <Common/ZooKeeper/Types.h>
#include <Common/ZooKeeper/ZooKeeper.h> #include <Common/ZooKeeper/ZooKeeper.h>
#include <Databases/DatabaseReplicated.h>
#include <Databases/DatabaseReplicatedWorker.h> #include <Databases/DatabaseReplicatedWorker.h>
#include <Interpreters/DDLTask.h> #include <Databases/DDLDependencyVisitor.h>
#include <Interpreters/executeDDLQueryOnCluster.h> #include <Databases/TablesDependencyGraph.h>
#include <Interpreters/Cluster.h> #include <Interpreters/Cluster.h>
#include <base/getFQDNOrHostName.h> #include <Interpreters/Context.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Interpreters/executeDDLQueryOnCluster.h>
#include <Interpreters/executeQuery.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Parsers/ASTAlterQuery.h> #include <Parsers/ASTAlterQuery.h>
#include <Parsers/ASTDropQuery.h> #include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTFunction.h> #include <Parsers/ASTFunction.h>
#include <Parsers/ParserCreateQuery.h>
#include <Parsers/parseQuery.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Parsers/formatAST.h> #include <Parsers/formatAST.h>
#include <Backups/IRestoreCoordination.h> #include <Parsers/parseQuery.h>
#include <Backups/RestorerFromBackup.h> #include <Parsers/ParserCreateQuery.h>
#include <Common/Macros.h> #include <Parsers/queryToString.h>
#include <base/chrono_io.h>
#include <utility>
namespace DB namespace DB
{ {
@ -905,31 +908,37 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
for (const auto & id : dropped_tables) for (const auto & id : dropped_tables)
DatabaseCatalog::instance().waitTableFinallyDropped(id); DatabaseCatalog::instance().waitTableFinallyDropped(id);
/// FIXME: Use proper dependency calculation instead of just moving MV to the end
using NameToMetadata = std::pair<String, String>;
std::vector<NameToMetadata> table_name_to_metadata_sorted;
table_name_to_metadata_sorted.reserve(table_name_to_metadata.size());
std::move(table_name_to_metadata.begin(), table_name_to_metadata.end(), std::back_inserter(table_name_to_metadata_sorted));
std::sort(table_name_to_metadata_sorted.begin(), table_name_to_metadata_sorted.end(), [](const NameToMetadata & lhs, const NameToMetadata & rhs) -> bool
{
const bool is_materialized_view_lhs = lhs.second.find("MATERIALIZED VIEW") != std::string::npos;
const bool is_materialized_view_rhs = rhs.second.find("MATERIALIZED VIEW") != std::string::npos;
return is_materialized_view_lhs < is_materialized_view_rhs;
});
for (const auto & name_and_meta : table_name_to_metadata_sorted) /// Create all needed tables in a proper order
TablesDependencyGraph tables_dependencies("DatabaseReplicated (" + getDatabaseName() + ")");
for (const auto & [table_name, create_table_query] : table_name_to_metadata)
{ {
if (isTableExist(name_and_meta.first, getContext())) /// Note that table_name could contain a dot inside (e.g. .inner.1234-1234-1234-1234)
/// And QualifiedTableName::parseFromString doesn't handle this.
auto qualified_name = QualifiedTableName{.database = getDatabaseName(), .table = table_name};
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_table_query);
tables_dependencies.addDependencies(qualified_name, getDependenciesFromCreateQuery(getContext(), qualified_name, query_ast));
}
tables_dependencies.checkNoCyclicDependencies();
auto tables_to_create = tables_dependencies.getTablesSortedByDependency();
for (const auto & table_id : tables_to_create)
{
auto table_name = table_id.getTableName();
auto create_query_string = table_name_to_metadata[table_name];
if (isTableExist(table_name, getContext()))
{ {
assert(name_and_meta.second == readMetadataFile(name_and_meta.first)); assert(create_query_string == readMetadataFile(table_name));
continue; continue;
} }
auto query_ast = parseQueryFromMetadataInZooKeeper(name_and_meta.first, name_and_meta.second); auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_query_string);
LOG_INFO(log, "Executing {}", serializeAST(*query_ast)); LOG_INFO(log, "Executing {}", serializeAST(*query_ast));
auto create_query_context = make_query_context(); auto create_query_context = make_query_context();
InterpreterCreateQuery(query_ast, create_query_context).execute(); InterpreterCreateQuery(query_ast, create_query_context).execute();
} }
LOG_INFO(log, "All tables are created successfully");
if (max_log_ptr_at_creation != 0) if (max_log_ptr_at_creation != 0)
{ {

View File

@ -15,7 +15,6 @@
#include <Functions/FunctionHelpers.h> #include <Functions/FunctionHelpers.h>
#include <Interpreters/castColumn.h> #include <Interpreters/castColumn.h>
#include <Dictionaries/DictionaryFactory.h>
#include <Dictionaries/DictionarySource.h> #include <Dictionaries/DictionarySource.h>
@ -1017,91 +1016,7 @@ Pipe RangeHashedDictionary<dictionary_key_type>::read(const Names & column_names
return result; return result;
} }
template <DictionaryKeyType dictionary_key_type> template class RangeHashedDictionary<DictionaryKeyType::Simple>;
static DictionaryPtr createRangeHashedDictionary(const std::string & full_name, template class RangeHashedDictionary<DictionaryKeyType::Complex>;
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
DictionarySourcePtr source_ptr)
{
static constexpr auto layout_name = dictionary_key_type == DictionaryKeyType::Simple ? "range_hashed" : "complex_key_range_hashed";
if constexpr (dictionary_key_type == DictionaryKeyType::Simple)
{
if (dict_struct.key)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is not supported for dictionary of layout 'range_hashed'");
}
else
{
if (dict_struct.id)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is not supported for dictionary of layout 'complex_key_range_hashed'");
}
if (!dict_struct.range_min || !dict_struct.range_max)
throw Exception(
ErrorCodes::BAD_ARGUMENTS,
"{}: dictionary of layout '{}' requires .structure.range_min and .structure.range_max",
full_name,
layout_name);
const auto dict_id = StorageID::fromDictionaryConfig(config, config_prefix);
const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"};
const bool require_nonempty = config.getBool(config_prefix + ".require_nonempty", false);
String dictionary_layout_prefix = config_prefix + ".layout." + layout_name;
const bool convert_null_range_bound_to_open = config.getBool(dictionary_layout_prefix + ".convert_null_range_bound_to_open", true);
String range_lookup_strategy = config.getString(dictionary_layout_prefix + ".range_lookup_strategy", "min");
RangeHashedDictionaryLookupStrategy lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
if (range_lookup_strategy == "min")
lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
else if (range_lookup_strategy == "max")
lookup_strategy = RangeHashedDictionaryLookupStrategy::max;
RangeHashedDictionaryConfiguration configuration
{
.convert_null_range_bound_to_open = convert_null_range_bound_to_open,
.lookup_strategy = lookup_strategy,
.require_nonempty = require_nonempty
};
DictionaryPtr result = std::make_unique<RangeHashedDictionary<dictionary_key_type>>(
dict_id,
dict_struct,
std::move(source_ptr),
dict_lifetime,
configuration);
return result;
}
void registerDictionaryRangeHashed(DictionaryFactory & factory)
{
auto create_layout_simple = [=](const std::string & full_name,
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
DictionarySourcePtr source_ptr,
ContextPtr /* global_context */,
bool /*created_from_ddl*/) -> DictionaryPtr
{
return createRangeHashedDictionary<DictionaryKeyType::Simple>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
};
factory.registerLayout("range_hashed", create_layout_simple, false);
auto create_layout_complex = [=](const std::string & full_name,
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
DictionarySourcePtr source_ptr,
ContextPtr /* context */,
bool /*created_from_ddl*/) -> DictionaryPtr
{
return createRangeHashedDictionary<DictionaryKeyType::Complex>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
};
factory.registerLayout("complex_key_range_hashed", create_layout_complex, true);
}
} }

View File

@ -252,4 +252,7 @@ private:
Arena string_arena; Arena string_arena;
}; };
extern template class RangeHashedDictionary<DictionaryKeyType::Simple>;
extern template class RangeHashedDictionary<DictionaryKeyType::Complex>;
} }

View File

@ -0,0 +1,101 @@
#include "RangeHashedDictionary.h"
#include <Dictionaries/DictionarySource.h>
#include <Dictionaries/DictionaryFactory.h>
namespace DB
{
namespace ErrorCodes
{
extern const int UNSUPPORTED_METHOD;
extern const int BAD_ARGUMENTS;
}
template <DictionaryKeyType dictionary_key_type>
static DictionaryPtr createRangeHashedDictionary(const std::string & full_name,
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
DictionarySourcePtr source_ptr)
{
static constexpr auto layout_name = dictionary_key_type == DictionaryKeyType::Simple ? "range_hashed" : "complex_key_range_hashed";
if constexpr (dictionary_key_type == DictionaryKeyType::Simple)
{
if (dict_struct.key)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is not supported for dictionary of layout 'range_hashed'");
}
else
{
if (dict_struct.id)
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is not supported for dictionary of layout 'complex_key_range_hashed'");
}
if (!dict_struct.range_min || !dict_struct.range_max)
throw Exception(
ErrorCodes::BAD_ARGUMENTS,
"{}: dictionary of layout '{}' requires .structure.range_min and .structure.range_max",
full_name,
layout_name);
const auto dict_id = StorageID::fromDictionaryConfig(config, config_prefix);
const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"};
const bool require_nonempty = config.getBool(config_prefix + ".require_nonempty", false);
String dictionary_layout_prefix = config_prefix + ".layout." + layout_name;
const bool convert_null_range_bound_to_open = config.getBool(dictionary_layout_prefix + ".convert_null_range_bound_to_open", true);
String range_lookup_strategy = config.getString(dictionary_layout_prefix + ".range_lookup_strategy", "min");
RangeHashedDictionaryLookupStrategy lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
if (range_lookup_strategy == "min")
lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
else if (range_lookup_strategy == "max")
lookup_strategy = RangeHashedDictionaryLookupStrategy::max;
RangeHashedDictionaryConfiguration configuration
{
.convert_null_range_bound_to_open = convert_null_range_bound_to_open,
.lookup_strategy = lookup_strategy,
.require_nonempty = require_nonempty
};
DictionaryPtr result = std::make_unique<RangeHashedDictionary<dictionary_key_type>>(
dict_id,
dict_struct,
std::move(source_ptr),
dict_lifetime,
configuration);
return result;
}
void registerDictionaryRangeHashed(DictionaryFactory & factory)
{
auto create_layout_simple = [=](const std::string & full_name,
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
DictionarySourcePtr source_ptr,
ContextPtr /* global_context */,
bool /*created_from_ddl*/) -> DictionaryPtr
{
return createRangeHashedDictionary<DictionaryKeyType::Simple>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
};
factory.registerLayout("range_hashed", create_layout_simple, false);
auto create_layout_complex = [=](const std::string & full_name,
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,
const std::string & config_prefix,
DictionarySourcePtr source_ptr,
ContextPtr /* context */,
bool /*created_from_ddl*/) -> DictionaryPtr
{
return createRangeHashedDictionary<DictionaryKeyType::Complex>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
};
factory.registerLayout("complex_key_range_hashed", create_layout_complex, true);
}
}

View File

@ -4,6 +4,7 @@
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h> #include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
#include <Disks/DiskFactory.h> #include <Disks/DiskFactory.h>
#include <Storages/HDFS/HDFSCommon.h> #include <Storages/HDFS/HDFSCommon.h>
#include <Common/Macros.h>
namespace DB namespace DB
{ {
@ -22,7 +23,8 @@ void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check)
ContextPtr context, ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr const DisksMap & /*map*/) -> DiskPtr
{ {
String uri{config.getString(config_prefix + ".endpoint")}; String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
String uri{endpoint};
checkHDFSURL(uri); checkHDFSURL(uri);
if (uri.back() != '/') if (uri.back() != '/')

View File

@ -1,5 +1,7 @@
#include <Disks/ObjectStorages/S3/S3ObjectStorage.h> #include <Disks/ObjectStorages/S3/S3ObjectStorage.h>
#include <Common/ProfileEvents.h> #include <Common/ProfileEvents.h>
#include <Interpreters/Context.h>
#if USE_AWS_S3 #if USE_AWS_S3
@ -31,6 +33,7 @@
#include <Common/StringUtils/StringUtils.h> #include <Common/StringUtils/StringUtils.h>
#include <Common/logger_useful.h> #include <Common/logger_useful.h>
#include <Common/MultiVersion.h> #include <Common/MultiVersion.h>
#include <Common/Macros.h>
namespace ProfileEvents namespace ProfileEvents
@ -634,10 +637,11 @@ std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(
{ {
auto new_s3_settings = getSettings(config, config_prefix, context); auto new_s3_settings = getSettings(config, config_prefix, context);
auto new_client = getClient(config, config_prefix, context, *new_s3_settings); auto new_client = getClient(config, config_prefix, context, *new_s3_settings);
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
return std::make_unique<S3ObjectStorage>( return std::make_unique<S3ObjectStorage>(
std::move(new_client), std::move(new_s3_settings), std::move(new_client), std::move(new_s3_settings),
version_id, s3_capabilities, new_namespace, version_id, s3_capabilities, new_namespace,
config.getString(config_prefix + ".endpoint")); endpoint);
} }
} }

View File

@ -21,6 +21,7 @@
#include <Disks/ObjectStorages/S3/ProxyResolverConfiguration.h> #include <Disks/ObjectStorages/S3/ProxyResolverConfiguration.h>
#include <Disks/ObjectStorages/DiskObjectStorageCommon.h> #include <Disks/ObjectStorages/DiskObjectStorageCommon.h>
#include <Disks/DiskLocal.h> #include <Disks/DiskLocal.h>
#include <Common/Macros.h>
namespace DB namespace DB
{ {
@ -121,7 +122,8 @@ std::unique_ptr<Aws::S3::S3Client> getClient(
settings.request_settings.get_request_throttler, settings.request_settings.get_request_throttler,
settings.request_settings.put_request_throttler); settings.request_settings.put_request_throttler);
S3::URI uri(config.getString(config_prefix + ".endpoint")); String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
S3::URI uri(endpoint);
if (uri.key.back() != '/') if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS); throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);

View File

@ -23,6 +23,7 @@
#include <Storages/StorageS3Settings.h> #include <Storages/StorageS3Settings.h>
#include <Core/ServerUUID.h> #include <Core/ServerUUID.h>
#include <Common/Macros.h>
namespace DB namespace DB
@ -104,7 +105,8 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
ContextPtr context, ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr const DisksMap & /*map*/) -> DiskPtr
{ {
S3::URI uri(config.getString(config_prefix + ".endpoint")); String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
S3::URI uri(endpoint);
if (uri.key.empty()) if (uri.key.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString()); throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString());

View File

@ -5,6 +5,9 @@
#include <Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h> #include <Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h>
#include <Disks/ObjectStorages/DiskObjectStorage.h> #include <Disks/ObjectStorages/DiskObjectStorage.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Common/Macros.h>
#include <Interpreters/Context.h>
namespace DB namespace DB
{ {
@ -23,7 +26,7 @@ void registerDiskWebServer(DiskFactory & factory, bool global_skip_access_check)
ContextPtr context, ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr const DisksMap & /*map*/) -> DiskPtr
{ {
String uri{config.getString(config_prefix + ".endpoint")}; String uri = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false); bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
if (!uri.ends_with('/')) if (!uri.ends_with('/'))

View File

@ -432,7 +432,7 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo
settings.json.read_bools_as_numbers, settings.json.read_bools_as_numbers,
settings.json.read_objects_as_strings, settings.json.read_objects_as_strings,
settings.json.read_numbers_as_strings, settings.json.read_numbers_as_strings,
settings.json.try_infer_objects); settings.json.allow_object_type);
break; break;
default: default:
break; break;

View File

@ -103,7 +103,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.json.validate_types_from_metadata = settings.input_format_json_validate_types_from_metadata; format_settings.json.validate_types_from_metadata = settings.input_format_json_validate_types_from_metadata;
format_settings.json.validate_utf8 = settings.output_format_json_validate_utf8; format_settings.json.validate_utf8 = settings.output_format_json_validate_utf8;
format_settings.json_object_each_row.column_for_object_name = settings.format_json_object_each_row_column_for_object_name; format_settings.json_object_each_row.column_for_object_name = settings.format_json_object_each_row_column_for_object_name;
format_settings.json.try_infer_objects = context->getSettingsRef().allow_experimental_object_type; format_settings.json.allow_object_type = context->getSettingsRef().allow_experimental_object_type;
format_settings.null_as_default = settings.input_format_null_as_default; format_settings.null_as_default = settings.input_format_null_as_default;
format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros; format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros;
format_settings.parquet.row_group_size = settings.output_format_parquet_row_group_size; format_settings.parquet.row_group_size = settings.output_format_parquet_row_group_size;

View File

@ -161,7 +161,7 @@ struct FormatSettings
bool try_infer_numbers_from_strings = false; bool try_infer_numbers_from_strings = false;
bool validate_types_from_metadata = true; bool validate_types_from_metadata = true;
bool validate_utf8 = false; bool validate_utf8 = false;
bool try_infer_objects = false; bool allow_object_type = false;
} json; } json;
struct struct

View File

@ -366,7 +366,7 @@ namespace
transformJSONTuplesAndArraysToArrays(data_types, settings, type_indexes, json_info); transformJSONTuplesAndArraysToArrays(data_types, settings, type_indexes, json_info);
/// Convert Maps to Objects if needed. /// Convert Maps to Objects if needed.
if (settings.json.try_infer_objects) if (settings.json.allow_object_type)
transformMapsAndObjectsToObjects(data_types, type_indexes); transformMapsAndObjectsToObjects(data_types, type_indexes);
if (settings.json.read_objects_as_strings) if (settings.json.read_objects_as_strings)
@ -716,7 +716,7 @@ namespace
{ {
if constexpr (is_json) if constexpr (is_json)
{ {
if (settings.json.try_infer_objects) if (settings.json.allow_object_type)
return std::make_shared<DataTypeObject>("json", true); return std::make_shared<DataTypeObject>("json", true);
} }
/// Empty Map is Map(Nothing, Nothing) /// Empty Map is Map(Nothing, Nothing)
@ -735,7 +735,7 @@ namespace
transformInferredTypesIfNeededImpl<is_json>(value_types, settings, json_info); transformInferredTypesIfNeededImpl<is_json>(value_types, settings, json_info);
if (!checkIfTypesAreEqual(value_types)) if (!checkIfTypesAreEqual(value_types))
{ {
if (settings.json.try_infer_objects) if (settings.json.allow_object_type)
return std::make_shared<DataTypeObject>("json", true); return std::make_shared<DataTypeObject>("json", true);
if (settings.json.read_objects_as_strings) if (settings.json.read_objects_as_strings)
return std::make_shared<DataTypeString>(); return std::make_shared<DataTypeString>();

View File

@ -112,12 +112,7 @@ endif ()
target_link_libraries(clickhouse_functions_obj PUBLIC ${PUBLIC_LIBS} PRIVATE ${PRIVATE_LIBS}) target_link_libraries(clickhouse_functions_obj PUBLIC ${PUBLIC_LIBS} PRIVATE ${PRIVATE_LIBS})
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES) # Used to forward the linking information to the final binaries such as clickhouse / unit_tests_dbms,
# Used to forward the linking information to the final binaries such as clickhouse / unit_tests_dbms, # since such information are lost after we convert to OBJECT target
# since such information are lost after we convert to OBJECT target add_library(clickhouse_functions INTERFACE)
add_library(clickhouse_functions INTERFACE) target_link_libraries(clickhouse_functions INTERFACE ${OBJECT_LIBS} ${PUBLIC_LIBS} ${PRIVATE_LIBS})
target_link_libraries(clickhouse_functions INTERFACE ${OBJECT_LIBS} ${PUBLIC_LIBS} ${PRIVATE_LIBS})
else()
add_library(clickhouse_functions SHARED ${OBJECT_LIBS})
target_link_libraries(clickhouse_functions PUBLIC ${PUBLIC_LIBS} PRIVATE ${PRIVATE_LIBS})
endif ()

View File

@ -1,5 +1,6 @@
#pragma once #pragma once
#include <Functions/FunctionsConversion.h> #include <Functions/FunctionsConversion.h>
#include <Interpreters/parseColumnsListForTableFunction.h>
namespace DB namespace DB
{ {
@ -32,10 +33,11 @@ public:
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; } ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
explicit CastOverloadResolverImpl(ContextPtr context_, std::optional<Diagnostic> diagnostic_, bool keep_nullable_) explicit CastOverloadResolverImpl(ContextPtr context_, std::optional<Diagnostic> diagnostic_, bool keep_nullable_, const DataTypeValidationSettings & data_type_validation_settings_)
: context(context_) : context(context_)
, diagnostic(std::move(diagnostic_)) , diagnostic(std::move(diagnostic_))
, keep_nullable(keep_nullable_) , keep_nullable(keep_nullable_)
, data_type_validation_settings(data_type_validation_settings_)
{ {
} }
@ -46,19 +48,19 @@ public:
if constexpr (internal) if constexpr (internal)
return createImpl(context, {}, false /*keep_nullable*/); return createImpl(context, {}, false /*keep_nullable*/);
return createImpl(context, {}, settings_ref.cast_keep_nullable); return createImpl(context, {}, settings_ref.cast_keep_nullable, DataTypeValidationSettings(settings_ref));
} }
static FunctionOverloadResolverPtr createImpl(ContextPtr context, std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false) static FunctionOverloadResolverPtr createImpl(ContextPtr context, std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false, const DataTypeValidationSettings & data_type_validation_settings = {})
{ {
assert(!internal || !keep_nullable); assert(!internal || !keep_nullable);
return std::make_unique<CastOverloadResolverImpl>(context, std::move(diagnostic), keep_nullable); return std::make_unique<CastOverloadResolverImpl>(context, std::move(diagnostic), keep_nullable, data_type_validation_settings);
} }
static FunctionOverloadResolverPtr createImpl(std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false) static FunctionOverloadResolverPtr createImpl(std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false, const DataTypeValidationSettings & data_type_validation_settings = {})
{ {
assert(!internal || !keep_nullable); assert(!internal || !keep_nullable);
return std::make_unique<CastOverloadResolverImpl>(ContextPtr(), std::move(diagnostic), keep_nullable); return std::make_unique<CastOverloadResolverImpl>(ContextPtr(), std::move(diagnostic), keep_nullable, data_type_validation_settings);
} }
protected: protected:
@ -89,6 +91,7 @@ protected:
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT); ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
DataTypePtr type = DataTypeFactory::instance().get(type_col->getValue<String>()); DataTypePtr type = DataTypeFactory::instance().get(type_col->getValue<String>());
validateDataType(type, data_type_validation_settings);
if constexpr (cast_type == CastType::accurateOrNull) if constexpr (cast_type == CastType::accurateOrNull)
return makeNullable(type); return makeNullable(type);
@ -110,6 +113,7 @@ private:
ContextPtr context; ContextPtr context;
std::optional<Diagnostic> diagnostic; std::optional<Diagnostic> diagnostic;
bool keep_nullable; bool keep_nullable;
DataTypeValidationSettings data_type_validation_settings;
}; };

View File

@ -1343,6 +1343,30 @@ struct ToYYYYMMDDhhmmssImpl
using FactorTransform = ZeroTransform; using FactorTransform = ZeroTransform;
}; };
struct ToDateTimeComponentsImpl
{
static constexpr auto name = "toDateTimeComponents";
static inline DateLUTImpl::DateTimeComponents execute(Int64 t, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(t);
}
static inline DateLUTImpl::DateTimeComponents execute(UInt32 t, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(static_cast<DateLUTImpl::Time>(t));
}
static inline DateLUTImpl::DateTimeComponents execute(Int32 d, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(ExtendedDayNum(d));
}
static inline DateLUTImpl::DateTimeComponents execute(UInt16 d, const DateLUTImpl & time_zone)
{
return time_zone.toDateTimeComponents(DayNum(d));
}
using FactorTransform = ZeroTransform;
};
template <typename FromType, typename ToType, typename Transform, bool is_extended_result = false> template <typename FromType, typename ToType, typename Transform, bool is_extended_result = false>
struct Transformer struct Transformer

View File

@ -1,6 +1,5 @@
#pragma once #pragma once
#include "Common/Exception.h"
#include <cstddef> #include <cstddef>
#include <type_traits> #include <type_traits>
@ -43,6 +42,7 @@
#include <Columns/ColumnStringHelpers.h> #include <Columns/ColumnStringHelpers.h>
#include <Common/assert_cast.h> #include <Common/assert_cast.h>
#include <Common/quoteString.h> #include <Common/quoteString.h>
#include <Common/Exception.h>
#include <Core/AccurateComparison.h> #include <Core/AccurateComparison.h>
#include <Functions/IFunctionAdaptors.h> #include <Functions/IFunctionAdaptors.h>
#include <Functions/FunctionsMiscellaneous.h> #include <Functions/FunctionsMiscellaneous.h>
@ -2278,16 +2278,19 @@ struct ToNumberMonotonicity
/// Integer cases. /// Integer cases.
/// Only support types represented by native integers.
/// It can be extended to big integers, decimals and DateTime64 later.
/// By the way, NULLs are representing unbounded ranges.
if (!((left.isNull() || left.getType() == Field::Types::UInt64 || left.getType() == Field::Types::Int64)
&& (right.isNull() || right.getType() == Field::Types::UInt64 || right.getType() == Field::Types::Int64)))
return {};
const bool from_is_unsigned = type.isValueRepresentedByUnsignedInteger(); const bool from_is_unsigned = type.isValueRepresentedByUnsignedInteger();
const bool to_is_unsigned = is_unsigned_v<T>; const bool to_is_unsigned = is_unsigned_v<T>;
const size_t size_of_from = type.getSizeOfValueInMemory(); const size_t size_of_from = type.getSizeOfValueInMemory();
const size_t size_of_to = sizeof(T); const size_t size_of_to = sizeof(T);
/// Do not support 128 bit integers and decimals for now.
if (size_of_from > sizeof(Int64) || which_inner_type.isDecimal())
return {};
const bool left_in_first_half = left.isNull() const bool left_in_first_half = left.isNull()
? from_is_unsigned ? from_is_unsigned
: (left.get<Int64>() >= 0); : (left.get<Int64>() >= 0);

View File

@ -8,16 +8,6 @@ namespace DB
REGISTER_FUNCTION(Hashing) REGISTER_FUNCTION(Hashing)
{ {
#if USE_SSL
factory.registerFunction<FunctionMD4>();
factory.registerFunction<FunctionHalfMD5>();
factory.registerFunction<FunctionMD5>();
factory.registerFunction<FunctionSHA1>();
factory.registerFunction<FunctionSHA224>();
factory.registerFunction<FunctionSHA256>();
factory.registerFunction<FunctionSHA384>();
factory.registerFunction<FunctionSHA512>();
#endif
factory.registerFunction<FunctionSipHash64>(); factory.registerFunction<FunctionSipHash64>();
factory.registerFunction<FunctionSipHash128>(); factory.registerFunction<FunctionSipHash128>();
factory.registerFunction<FunctionCityHash64>(); factory.registerFunction<FunctionCityHash64>();

View File

@ -0,0 +1,27 @@
#include "config.h"
#if USE_SSL
#include "FunctionsHashing.h"
#include <Functions/FunctionFactory.h>
/// SSL functions are located in the separate FunctionsHashingSSL.cpp file
/// to lower the compilation time of FunctionsHashing.cpp
namespace DB
{
REGISTER_FUNCTION(HashingSSL)
{
factory.registerFunction<FunctionMD4>();
factory.registerFunction<FunctionHalfMD5>();
factory.registerFunction<FunctionMD5>();
factory.registerFunction<FunctionSHA1>();
factory.registerFunction<FunctionSHA224>();
factory.registerFunction<FunctionSHA256>();
factory.registerFunction<FunctionSHA384>();
factory.registerFunction<FunctionSHA512>();
}
}
#endif

View File

@ -205,7 +205,7 @@ public:
const String & expression_return_name_) const String & expression_return_name_)
: expression_actions(std::move(expression_actions_)) : expression_actions(std::move(expression_actions_))
{ {
/// Check that expression does not contain unusual actions that will break columnss structure. /// Check that expression does not contain unusual actions that will break columns structure.
for (const auto & action : expression_actions->getActions()) for (const auto & action : expression_actions->getActions())
if (action.node->type == ActionsDAG::ActionType::ARRAY_JOIN) if (action.node->type == ActionsDAG::ActionType::ARRAY_JOIN)
throw Exception("Expression with arrayJoin or other unusual action cannot be captured", ErrorCodes::BAD_ARGUMENTS); throw Exception("Expression with arrayJoin or other unusual action cannot be captured", ErrorCodes::BAD_ARGUMENTS);

View File

@ -210,7 +210,7 @@ public:
throw Exception("There are no available implementations for function " "TODO(dakovalkov): add name", throw Exception("There are no available implementations for function " "TODO(dakovalkov): add name",
ErrorCodes::NO_SUITABLE_FUNCTION_IMPLEMENTATION); ErrorCodes::NO_SUITABLE_FUNCTION_IMPLEMENTATION);
/// Statistics shouldn't rely on small columnss. /// Statistics shouldn't rely on small columns.
bool considerable = (input_rows_count > 1000); bool considerable = (input_rows_count > 1000);
ColumnPtr res; ColumnPtr res;

Some files were not shown because too many files have changed in this diff Show More