mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge branch 'master' into fix-integration-base
This commit is contained in:
commit
9756d46ad7
74
.github/workflows/master.yml
vendored
74
.github/workflows/master.yml
vendored
@ -141,37 +141,6 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SharedBuildSmokeTest:
|
||||
needs: [BuilderDebShared]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/split_build_check
|
||||
REPO_COPY=${{runner.temp}}/split_build_check/ClickHouse
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Shared build check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 split_build_smoke_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
@ -508,47 +477,6 @@ jobs:
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderDebShared:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_shared
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinClangTidy:
|
||||
needs: [DockerHubPush]
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -968,7 +896,6 @@ jobs:
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
- BuilderDebShared
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
@ -3139,7 +3066,6 @@ jobs:
|
||||
- UnitTestsMsan
|
||||
- UnitTestsUBsan
|
||||
- UnitTestsReleaseClang
|
||||
- SharedBuildSmokeTest
|
||||
- SQLancerTestRelease
|
||||
- SQLancerTestDebug
|
||||
runs-on: [self-hosted, style-checker]
|
||||
|
79
.github/workflows/pull_request.yml
vendored
79
.github/workflows/pull_request.yml
vendored
@ -203,37 +203,6 @@ jobs:
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
SharedBuildSmokeTest:
|
||||
needs: [BuilderDebShared]
|
||||
runs-on: [self-hosted, style-checker]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/split_build_check
|
||||
REPO_COPY=${{runner.temp}}/split_build_check/ClickHouse
|
||||
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||
EOF
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
- name: Download json reports
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
path: ${{ env.REPORTS_PATH }}
|
||||
- name: Shared build check
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 split_build_smoke_check.py
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
#########################################################################################
|
||||
#################################### ORDINARY BUILDS ####################################
|
||||
#########################################################################################
|
||||
@ -570,47 +539,6 @@ jobs:
|
||||
##########################################################################################
|
||||
##################################### SPECIAL BUILDS #####################################
|
||||
##########################################################################################
|
||||
BuilderDebShared:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
cat >> "$GITHUB_ENV" << 'EOF'
|
||||
TEMP_PATH=${{runner.temp}}/build_check
|
||||
IMAGES_PATH=${{runner.temp}}/images_path
|
||||
REPO_COPY=${{runner.temp}}/build_check/ClickHouse
|
||||
CACHES_PATH=${{runner.temp}}/../ccaches
|
||||
BUILD_NAME=binary_shared
|
||||
EOF
|
||||
- name: Download changed images
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: changed_images
|
||||
path: ${{ env.IMAGES_PATH }}
|
||||
- name: Check out repository code
|
||||
uses: ClickHouse/checkout@v1
|
||||
with:
|
||||
clear-repository: true
|
||||
submodules: true
|
||||
- name: Build
|
||||
run: |
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
mkdir -p "$TEMP_PATH"
|
||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||
cd "$REPO_COPY/tests/ci" && python3 build_check.py "$BUILD_NAME"
|
||||
- name: Upload build URLs to artifacts
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ env.BUILD_URLS }}
|
||||
path: ${{ env.TEMP_PATH }}/${{ env.BUILD_URLS }}.json
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||
sudo rm -fr "$TEMP_PATH" "$CACHES_PATH"
|
||||
BuilderBinClangTidy:
|
||||
needs: [DockerHubPush, FastTest, StyleCheck]
|
||||
runs-on: [self-hosted, builder]
|
||||
@ -1018,12 +946,10 @@ jobs:
|
||||
- BuilderBinDarwin
|
||||
- BuilderBinDarwinAarch64
|
||||
- BuilderBinFreeBSD
|
||||
# - BuilderBinGCC
|
||||
- BuilderBinPPC64
|
||||
- BuilderBinAmd64Compat
|
||||
- BuilderBinAarch64V80Compat
|
||||
- BuilderBinClangTidy
|
||||
- BuilderDebShared
|
||||
runs-on: [self-hosted, style-checker]
|
||||
if: ${{ success() || failure() }}
|
||||
steps:
|
||||
@ -2603,7 +2529,7 @@ jobs:
|
||||
sudo rm -fr "$TEMP_PATH"
|
||||
TestsBugfixCheck:
|
||||
needs: [CheckLabels, StyleCheck]
|
||||
runs-on: [self-hosted, stress-tester]
|
||||
runs-on: [self-hosted, func-tester]
|
||||
steps:
|
||||
- name: Set envs
|
||||
run: |
|
||||
@ -2639,7 +2565,7 @@ jobs:
|
||||
python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \
|
||||
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
|
||||
|
||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv"
|
||||
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv"
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
@ -4448,7 +4374,6 @@ jobs:
|
||||
- UnitTestsMsan
|
||||
- UnitTestsUBsan
|
||||
- UnitTestsReleaseClang
|
||||
- SharedBuildSmokeTest
|
||||
- CompatibilityCheck
|
||||
- IntegrationTestsFlakyCheck
|
||||
- SQLancerTestRelease
|
||||
|
34
.gitmodules
vendored
34
.gitmodules
vendored
@ -104,13 +104,13 @@
|
||||
url = https://github.com/ClickHouse/aws-sdk-cpp.git
|
||||
[submodule "aws-c-event-stream"]
|
||||
path = contrib/aws-c-event-stream
|
||||
url = https://github.com/ClickHouse/aws-c-event-stream.git
|
||||
url = https://github.com/awslabs/aws-c-event-stream.git
|
||||
[submodule "aws-c-common"]
|
||||
path = contrib/aws-c-common
|
||||
url = https://github.com/ClickHouse/aws-c-common.git
|
||||
[submodule "aws-checksums"]
|
||||
path = contrib/aws-checksums
|
||||
url = https://github.com/ClickHouse/aws-checksums.git
|
||||
url = https://github.com/awslabs/aws-checksums.git
|
||||
[submodule "contrib/curl"]
|
||||
path = contrib/curl
|
||||
url = https://github.com/curl/curl.git
|
||||
@ -294,3 +294,33 @@
|
||||
[submodule "contrib/libdivide"]
|
||||
path = contrib/libdivide
|
||||
url = https://github.com/ridiculousfish/libdivide.git
|
||||
[submodule "contrib/aws-crt-cpp"]
|
||||
path = contrib/aws-crt-cpp
|
||||
url = https://github.com/ClickHouse/aws-crt-cpp.git
|
||||
[submodule "contrib/aws-c-io"]
|
||||
path = contrib/aws-c-io
|
||||
url = https://github.com/ClickHouse/aws-c-io.git
|
||||
[submodule "contrib/aws-c-mqtt"]
|
||||
path = contrib/aws-c-mqtt
|
||||
url = https://github.com/awslabs/aws-c-mqtt.git
|
||||
[submodule "contrib/aws-c-auth"]
|
||||
path = contrib/aws-c-auth
|
||||
url = https://github.com/awslabs/aws-c-auth.git
|
||||
[submodule "contrib/aws-c-cal"]
|
||||
path = contrib/aws-c-cal
|
||||
url = https://github.com/ClickHouse/aws-c-cal.git
|
||||
[submodule "contrib/aws-c-sdkutils"]
|
||||
path = contrib/aws-c-sdkutils
|
||||
url = https://github.com/awslabs/aws-c-sdkutils.git
|
||||
[submodule "contrib/aws-c-http"]
|
||||
path = contrib/aws-c-http
|
||||
url = https://github.com/awslabs/aws-c-http.git
|
||||
[submodule "contrib/aws-c-s3"]
|
||||
path = contrib/aws-c-s3
|
||||
url = https://github.com/awslabs/aws-c-s3.git
|
||||
[submodule "contrib/aws-c-compression"]
|
||||
path = contrib/aws-c-compression
|
||||
url = https://github.com/awslabs/aws-c-compression.git
|
||||
[submodule "contrib/aws-s2n-tls"]
|
||||
path = contrib/aws-s2n-tls
|
||||
url = https://github.com/aws/s2n-tls.git
|
||||
|
@ -73,22 +73,7 @@ message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
|
||||
|
||||
string (TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UC)
|
||||
|
||||
option(USE_STATIC_LIBRARIES "Disable to use shared libraries" ON)
|
||||
# DEVELOPER ONLY.
|
||||
# Faster linking if turned on.
|
||||
option(SPLIT_SHARED_LIBRARIES "Keep all internal libraries as separate .so files" OFF)
|
||||
|
||||
if (USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
message(FATAL_ERROR "SPLIT_SHARED_LIBRARIES=1 must not be used together with USE_STATIC_LIBRARIES=1")
|
||||
endif()
|
||||
|
||||
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
set(BUILD_SHARED_LIBS 1 CACHE INTERNAL "")
|
||||
endif ()
|
||||
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
|
||||
endif ()
|
||||
list(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
|
||||
|
||||
option (ENABLE_FUZZING "Fuzzy testing using libfuzzer" OFF)
|
||||
|
||||
@ -171,7 +156,7 @@ option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests"
|
||||
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
|
||||
option(ENABLE_BENCHMARKS "Build all benchmark programs in 'benchmarks' subdirectories" OFF)
|
||||
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND USE_STATIC_LIBRARIES AND NOT SPLIT_SHARED_LIBRARIES AND NOT USE_MUSL)
|
||||
if (OS_LINUX AND (ARCH_AMD64 OR ARCH_AARCH64) AND NOT USE_MUSL)
|
||||
# Only for Linux, x86_64 or aarch64.
|
||||
option(GLIBC_COMPATIBILITY "Enable compatibility with older glibc libraries." ON)
|
||||
elseif(GLIBC_COMPATIBILITY)
|
||||
@ -467,22 +452,13 @@ endif ()
|
||||
|
||||
set (CMAKE_POSTFIX_VARIABLE "CMAKE_${CMAKE_BUILD_TYPE_UC}_POSTFIX")
|
||||
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX AND NOT ARCH_AARCH64)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
endif ()
|
||||
else ()
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
|
||||
# This is required for clang on Arch linux, that uses PIE by default.
|
||||
# See enable-SSP-and-PIE-by-default.patch [1].
|
||||
#
|
||||
# [1]: https://github.com/archlinux/svntogit-packages/blob/6e681aa860e65ad46a1387081482eb875c2200f2/trunk/enable-SSP-and-PIE-by-default.patch
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie")
|
||||
set (CMAKE_POSITION_INDEPENDENT_CODE OFF)
|
||||
if (OS_LINUX AND NOT ARCH_AARCH64)
|
||||
# Slightly more efficient code can be generated
|
||||
# It's disabled for ARM because otherwise ClickHouse cannot run on Android.
|
||||
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} -fno-pie")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -no-pie -Wl,-no-pie")
|
||||
endif ()
|
||||
|
||||
if (ENABLE_TESTS)
|
||||
@ -504,10 +480,7 @@ else ()
|
||||
set (CLICKHOUSE_ETC_DIR "${CMAKE_INSTALL_PREFIX}/etc")
|
||||
endif ()
|
||||
|
||||
message (STATUS
|
||||
"Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE} ;
|
||||
USE_STATIC_LIBRARIES=${USE_STATIC_LIBRARIES}
|
||||
SPLIT_SHARED_LIBRARIES=${SPLIT_SHARED_LIBRARIES}")
|
||||
message (STATUS "Building for: ${CMAKE_SYSTEM} ${CMAKE_SYSTEM_PROCESSOR} ${CMAKE_LIBRARY_ARCHITECTURE}")
|
||||
|
||||
include (GNUInstallDirs)
|
||||
|
||||
@ -553,7 +526,7 @@ macro (clickhouse_add_executable target)
|
||||
# - _je_zone_register due to JEMALLOC_PRIVATE_NAMESPACE=je_ under OS X.
|
||||
# - but jemalloc-cmake does not run private_namespace.sh
|
||||
# so symbol name should be _zone_register
|
||||
if (ENABLE_JEMALLOC AND USE_STATIC_LIBRARIES AND OS_DARWIN)
|
||||
if (ENABLE_JEMALLOC AND OS_DARWIN)
|
||||
set_property(TARGET ${target} APPEND PROPERTY LINK_OPTIONS -u_zone_register)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -39,10 +39,6 @@ endif ()
|
||||
|
||||
target_include_directories(common PUBLIC .. "${CMAKE_CURRENT_BINARY_DIR}/..")
|
||||
|
||||
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
|
||||
target_link_libraries(common PUBLIC -Wl,-U,_inside_main)
|
||||
endif()
|
||||
|
||||
target_link_libraries (common
|
||||
PUBLIC
|
||||
ch_contrib::cityhash
|
||||
|
@ -37,7 +37,7 @@ if (GLIBC_COMPATIBILITY)
|
||||
|
||||
target_include_directories(glibc-compatibility PRIVATE libcxxabi ${musl_arch_include_dir})
|
||||
|
||||
if (( NOT USE_STATIC_LIBRARIES AND NOT USE_STATIC_LIBRARIES ) OR ENABLE_OPENSSL_DYNAMIC)
|
||||
if (ENABLE_OPENSSL_DYNAMIC)
|
||||
target_compile_options(glibc-compatibility PRIVATE -fPIC)
|
||||
endif ()
|
||||
|
||||
|
@ -25,7 +25,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${ASAN_FLAGS}")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libasan")
|
||||
endif ()
|
||||
if (COMPILER_GCC)
|
||||
@ -50,7 +50,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=memory")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libmsan")
|
||||
endif ()
|
||||
|
||||
@ -71,7 +71,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libtsan")
|
||||
endif ()
|
||||
if (COMPILER_GCC)
|
||||
@ -103,7 +103,7 @@ if (SANITIZE)
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined")
|
||||
endif()
|
||||
if (USE_STATIC_LIBRARIES AND CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libubsan")
|
||||
endif ()
|
||||
if (COMPILER_GCC)
|
||||
|
15
contrib/CMakeLists.txt
vendored
15
contrib/CMakeLists.txt
vendored
@ -115,12 +115,25 @@ endif()
|
||||
add_contrib (llvm-project-cmake llvm-project)
|
||||
add_contrib (libfuzzer-cmake llvm-project)
|
||||
add_contrib (libxml2-cmake libxml2)
|
||||
add_contrib (aws-s3-cmake
|
||||
|
||||
add_contrib (aws-cmake
|
||||
aws
|
||||
aws-c-auth
|
||||
aws-c-cal
|
||||
aws-c-common
|
||||
aws-c-compression
|
||||
aws-c-event-stream
|
||||
aws-c-http
|
||||
aws-c-io
|
||||
aws-c-mqtt
|
||||
aws-c-s3
|
||||
aws-c-sdkutils
|
||||
aws-s2n-tls
|
||||
aws-checksums
|
||||
aws-crt-cpp
|
||||
aws-cmake
|
||||
)
|
||||
|
||||
add_contrib (base64-cmake base64)
|
||||
add_contrib (simdjson-cmake simdjson)
|
||||
add_contrib (rapidjson-cmake rapidjson)
|
||||
|
@ -78,23 +78,14 @@ set(FLATBUFFERS_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/flatbuffers")
|
||||
set(FLATBUFFERS_INCLUDE_DIR "${FLATBUFFERS_SRC_DIR}/include")
|
||||
|
||||
# set flatbuffers CMake options
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
|
||||
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
|
||||
else ()
|
||||
set(FLATBUFFERS_BUILD_SHAREDLIB ON CACHE BOOL "Enable the build of the flatbuffers shared library")
|
||||
set(FLATBUFFERS_BUILD_FLATLIB OFF CACHE BOOL "Disable the build of the flatbuffers library")
|
||||
endif ()
|
||||
set(FLATBUFFERS_BUILD_FLATLIB ON CACHE BOOL "Enable the build of the flatbuffers library")
|
||||
set(FLATBUFFERS_BUILD_SHAREDLIB OFF CACHE BOOL "Disable the build of the flatbuffers shared library")
|
||||
set(FLATBUFFERS_BUILD_TESTS OFF CACHE BOOL "Skip flatbuffers tests")
|
||||
|
||||
add_subdirectory(${FLATBUFFERS_SRC_DIR} "${FLATBUFFERS_BINARY_DIR}")
|
||||
|
||||
add_library(_flatbuffers INTERFACE)
|
||||
if(USE_STATIC_LIBRARIES)
|
||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
||||
else()
|
||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers_shared)
|
||||
endif()
|
||||
target_link_libraries(_flatbuffers INTERFACE flatbuffers)
|
||||
target_include_directories(_flatbuffers INTERFACE ${FLATBUFFERS_INCLUDE_DIR})
|
||||
|
||||
# === hdfs
|
||||
|
2
contrib/aws
vendored
2
contrib/aws
vendored
@ -1 +1 @@
|
||||
Subproject commit 00b03604543367d7e310cb0993973fdcb723ea79
|
||||
Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252
|
1
contrib/aws-c-auth
vendored
Submodule
1
contrib/aws-c-auth
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 30df6c407e2df43bd244e2c34c9b4a4b87372bfb
|
1
contrib/aws-c-cal
vendored
Submodule
1
contrib/aws-c-cal
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 85dd7664b786a389c6fb1a6f031ab4bb2282133d
|
2
contrib/aws-c-common
vendored
2
contrib/aws-c-common
vendored
@ -1 +1 @@
|
||||
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857
|
||||
Subproject commit 324fd1d973ccb25c813aa747bf1759cfde5121c5
|
1
contrib/aws-c-compression
vendored
Submodule
1
contrib/aws-c-compression
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit b517b7decd0dac30be2162f5186c250221c53aff
|
2
contrib/aws-c-event-stream
vendored
2
contrib/aws-c-event-stream
vendored
@ -1 +1 @@
|
||||
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0
|
||||
Subproject commit 39bfa94a14b7126bf0c1330286ef8db452d87e66
|
1
contrib/aws-c-http
vendored
Submodule
1
contrib/aws-c-http
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 2c5a2a7d5556600b9782ffa6c9d7e09964df1abc
|
1
contrib/aws-c-io
vendored
Submodule
1
contrib/aws-c-io
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 5d32c453560d0823df521a686bf7fbacde7f9be3
|
1
contrib/aws-c-mqtt
vendored
Submodule
1
contrib/aws-c-mqtt
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 882c689561a3db1466330ccfe3b63637e0a575d3
|
1
contrib/aws-c-s3
vendored
Submodule
1
contrib/aws-c-s3
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit a41255ece72a7c887bba7f9d998ca3e14f4c8a1b
|
1
contrib/aws-c-sdkutils
vendored
Submodule
1
contrib/aws-c-sdkutils
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 25bf5cf225f977c3accc6a05a0a7a181ef2a4a30
|
2
contrib/aws-checksums
vendored
2
contrib/aws-checksums
vendored
@ -1 +1 @@
|
||||
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65
|
||||
Subproject commit 48e7c0e01479232f225c8044d76c84e74192889d
|
114
contrib/aws-cmake/AwsFeatureTests.cmake
Normal file
114
contrib/aws-cmake/AwsFeatureTests.cmake
Normal file
@ -0,0 +1,114 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckCSourceRuns)
|
||||
|
||||
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
|
||||
|
||||
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
|
||||
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
|
||||
# work-around, disable avx2 (and all other extensions) in mingw builds.
|
||||
#
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
|
||||
#
|
||||
if (MINGW)
|
||||
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
|
||||
set(USE_CPU_EXTENSIONS OFF)
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_CROSSCOMPILING)
|
||||
check_c_source_runs("
|
||||
#include <stdbool.h>
|
||||
bool foo(int a, int b, int *c) {
|
||||
return __builtin_mul_overflow(a, b, c);
|
||||
}
|
||||
|
||||
int main() {
|
||||
int out;
|
||||
if (foo(1, 2, &out)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
|
||||
|
||||
if (USE_CPU_EXTENSIONS)
|
||||
check_c_source_runs("
|
||||
int main() {
|
||||
int foo = 42;
|
||||
_mulx_u32(1, 2, &foo);
|
||||
return foo != 2;
|
||||
}" AWS_HAVE_MSVC_MULX)
|
||||
endif()
|
||||
|
||||
endif()
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <Windows.h>
|
||||
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
|
||||
int main() {
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
it's not windows desktop
|
||||
#endif
|
||||
" AWS_HAVE_WINAPI_DESKTOP)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
|
||||
# error \"not intel\"
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
" AWS_ARCH_INTEL)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
#if !(defined(__aarch64__) || defined(_M_ARM64))
|
||||
# error \"not arm64\"
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
" AWS_ARCH_ARM64)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
#if !(defined(__arm__) || defined(_M_ARM))
|
||||
# error \"not arm\"
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
" AWS_ARCH_ARM32)
|
||||
|
||||
check_c_source_compiles("
|
||||
int main() {
|
||||
int foo = 42, bar = 24;
|
||||
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
|
||||
}" AWS_HAVE_GCC_INLINE_ASM)
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <sys/auxv.h>
|
||||
int main() {
|
||||
#ifdef __linux__
|
||||
getauxval(AT_HWCAP);
|
||||
getauxval(AT_HWCAP2);
|
||||
#endif
|
||||
return 0;
|
||||
}" AWS_HAVE_AUXV)
|
||||
|
||||
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
|
||||
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
|
||||
check_c_source_compiles("
|
||||
#include <execinfo.h>
|
||||
int main() {
|
||||
backtrace(NULL, 0);
|
||||
return 0;
|
||||
}" AWS_HAVE_EXECINFO)
|
||||
endif()
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <linux/if_link.h>
|
||||
int main() {
|
||||
return 1;
|
||||
}" AWS_HAVE_LINUX_IF_LINK_H)
|
74
contrib/aws-cmake/AwsSIMD.cmake
Normal file
74
contrib/aws-cmake/AwsSIMD.cmake
Normal file
@ -0,0 +1,74 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckCCompilerFlag)
|
||||
include(CheckIncludeFile)
|
||||
|
||||
if (USE_CPU_EXTENSIONS)
|
||||
if (MSVC)
|
||||
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
|
||||
if (HAVE_M_AVX2_FLAG)
|
||||
set(AVX2_CFLAGS "/arch:AVX2")
|
||||
endif()
|
||||
else()
|
||||
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
|
||||
if (HAVE_M_AVX2_FLAG)
|
||||
set(AVX2_CFLAGS "-mavx -mavx2")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
cmake_push_check_state()
|
||||
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <immintrin.h>
|
||||
#include <emmintrin.h>
|
||||
#include <string.h>
|
||||
|
||||
int main() {
|
||||
__m256i vec;
|
||||
memset(&vec, 0, sizeof(vec));
|
||||
|
||||
_mm256_shuffle_epi8(vec, vec);
|
||||
_mm256_set_epi32(1,2,3,4,5,6,7,8);
|
||||
_mm256_permutevar8x32_epi32(vec, vec);
|
||||
|
||||
return 0;
|
||||
}" HAVE_AVX2_INTRINSICS)
|
||||
|
||||
check_c_source_compiles("
|
||||
#include <immintrin.h>
|
||||
#include <string.h>
|
||||
|
||||
int main() {
|
||||
__m256i vec;
|
||||
memset(&vec, 0, sizeof(vec));
|
||||
return (int)_mm256_extract_epi64(vec, 2);
|
||||
}" HAVE_MM256_EXTRACT_EPI64)
|
||||
|
||||
cmake_pop_check_state()
|
||||
endif() # USE_CPU_EXTENSIONS
|
||||
|
||||
macro(simd_add_definition_if target definition)
|
||||
if(${definition})
|
||||
target_compile_definitions(${target} PRIVATE -D${definition})
|
||||
endif(${definition})
|
||||
endmacro(simd_add_definition_if)
|
||||
|
||||
# Configure private preprocessor definitions for SIMD-related features
|
||||
# Does not set any processor feature codegen flags
|
||||
function(simd_add_definitions target)
|
||||
simd_add_definition_if(${target} HAVE_AVX2_INTRINSICS)
|
||||
simd_add_definition_if(${target} HAVE_MM256_EXTRACT_EPI64)
|
||||
endfunction(simd_add_definitions)
|
||||
|
||||
# Adds source files only if AVX2 is supported. These files will be built with
|
||||
# avx2 intrinsics enabled.
|
||||
# Usage: simd_add_source_avx2(target file1.c file2.c ...)
|
||||
function(simd_add_source_avx2 target)
|
||||
foreach(file ${ARGN})
|
||||
target_sources(${target} PRIVATE ${file})
|
||||
set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
|
||||
endforeach()
|
||||
endfunction(simd_add_source_avx2)
|
50
contrib/aws-cmake/AwsThreadAffinity.cmake
Normal file
50
contrib/aws-cmake/AwsThreadAffinity.cmake
Normal file
@ -0,0 +1,50 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckSymbolExists)
|
||||
|
||||
# Check if the platform supports setting thread affinity
|
||||
# (important for hitting full NIC entitlement on NUMA architectures)
|
||||
function(aws_set_thread_affinity_method target)
|
||||
|
||||
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
|
||||
if (NOT UNIX OR ANDROID OR APPLE)
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
cmake_push_check_state()
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
|
||||
|
||||
set(headers "pthread.h")
|
||||
# BSDs put nonportable pthread declarations in a separate header.
|
||||
if(CMAKE_SYSTEM_NAME MATCHES BSD)
|
||||
set(headers "${headers};pthread_np.h")
|
||||
endif()
|
||||
|
||||
# Using pthread attrs is the preferred method, but is glibc-specific.
|
||||
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
|
||||
if (USE_PTHREAD_ATTR_SETAFFINITY)
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# This method is still nonportable, but is supported by musl and BSDs.
|
||||
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
|
||||
if (USE_PTHREAD_SETAFFINITY)
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# If we got here, we expected thread affinity support but didn't find it.
|
||||
# We still build with degraded NUMA performance, but show a warning.
|
||||
message(WARNING "No supported method for setting thread affinity")
|
||||
target_compile_definitions(${target} PRIVATE
|
||||
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
|
||||
|
||||
cmake_pop_check_state()
|
||||
endfunction()
|
61
contrib/aws-cmake/AwsThreadName.cmake
Normal file
61
contrib/aws-cmake/AwsThreadName.cmake
Normal file
@ -0,0 +1,61 @@
|
||||
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
||||
# SPDX-License-Identifier: Apache-2.0.
|
||||
|
||||
include(CheckSymbolExists)
|
||||
|
||||
# Check how the platform supports setting thread name
|
||||
function(aws_set_thread_name_method target)
|
||||
|
||||
if (WINDOWS)
|
||||
# On Windows we do a runtime check, instead of compile-time check
|
||||
return()
|
||||
elseif (APPLE)
|
||||
# All Apple platforms we support have the same function, so no need for compile-time check.
|
||||
return()
|
||||
endif()
|
||||
|
||||
cmake_push_check_state()
|
||||
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
|
||||
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
|
||||
|
||||
# The start of the test program
|
||||
set(c_source_start "
|
||||
#define _GNU_SOURCE
|
||||
#include <pthread.h>
|
||||
|
||||
#if defined(__FreeBSD__) || defined(__NETBSD__)
|
||||
#include <pthread_np.h>
|
||||
#endif
|
||||
|
||||
int main() {
|
||||
pthread_t thread_id;
|
||||
")
|
||||
|
||||
# The end of the test program
|
||||
set(c_source_end "}")
|
||||
|
||||
# pthread_setname_np() usually takes 2 args
|
||||
check_c_source_compiles("
|
||||
${c_source_start}
|
||||
pthread_setname_np(thread_id, \"asdf\");
|
||||
${c_source_end}"
|
||||
PTHREAD_SETNAME_TAKES_2ARGS)
|
||||
if (PTHREAD_SETNAME_TAKES_2ARGS)
|
||||
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# But on NetBSD it takes 3!
|
||||
check_c_source_compiles("
|
||||
${c_source_start}
|
||||
pthread_setname_np(thread_id, \"asdf\", NULL);
|
||||
${c_source_end}
|
||||
" PTHREAD_SETNAME_TAKES_3ARGS)
|
||||
if (PTHREAD_SETNAME_TAKES_3ARGS)
|
||||
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# And on many older/weirder platforms it's just not supported
|
||||
cmake_pop_check_state()
|
||||
endfunction()
|
376
contrib/aws-cmake/CMakeLists.txt
Normal file
376
contrib/aws-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,376 @@
|
||||
set(ENABLE_AWS_S3_DEFAULT OFF)
|
||||
|
||||
if(ENABLE_LIBRARIES AND (OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::Crypto)
|
||||
set(ENABLE_AWS_S3_DEFAULT ON)
|
||||
endif()
|
||||
|
||||
option(ENABLE_AWS_S3 "Enable AWS S3" ${ENABLE_AWS_S3_DEFAULT})
|
||||
|
||||
if(ENABLE_AWS_S3)
|
||||
if(NOT TARGET OpenSSL::Crypto)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK without OpenSSL")
|
||||
elseif(NOT (OS_LINUX OR OS_DARWIN))
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK with platform ${CMAKE_SYSTEM_NAME}")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT ENABLE_AWS_S3)
|
||||
message(STATUS "Not using AWS S3")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
||||
# Utilities.
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsFeatureTests.cmake")
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
|
||||
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
|
||||
|
||||
|
||||
# Gather sources and options.
|
||||
set(AWS_SOURCES)
|
||||
set(AWS_PUBLIC_INCLUDES)
|
||||
set(AWS_PRIVATE_INCLUDES)
|
||||
set(AWS_PUBLIC_COMPILE_DEFS)
|
||||
set(AWS_PRIVATE_COMPILE_DEFS)
|
||||
set(AWS_PRIVATE_LIBS)
|
||||
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
|
||||
endif()
|
||||
|
||||
set(ENABLE_OPENSSL_ENCRYPTION ON)
|
||||
if (ENABLE_OPENSSL_ENCRYPTION)
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DENABLE_OPENSSL_ENCRYPTION")
|
||||
endif()
|
||||
|
||||
set(USE_S2N ON)
|
||||
if (USE_S2N)
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_S2N")
|
||||
endif()
|
||||
|
||||
|
||||
# Directories.
|
||||
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
|
||||
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-core")
|
||||
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-s3")
|
||||
|
||||
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
|
||||
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
|
||||
SET(AWS_CHECKSUMS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
|
||||
SET(AWS_COMMON_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
|
||||
SET(AWS_COMPRESSION_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-compression")
|
||||
SET(AWS_CRT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-crt-cpp")
|
||||
SET(AWS_EVENT_STREAM_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
|
||||
SET(AWS_HTTP_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-http")
|
||||
SET(AWS_IO_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-io")
|
||||
SET(AWS_MQTT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-mqtt")
|
||||
SET(AWS_S2N_TLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-s2n-tls")
|
||||
SET(AWS_S3_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-s3")
|
||||
SET(AWS_SDKUTILS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-sdkutils")
|
||||
|
||||
|
||||
# aws-cpp-sdk-core
|
||||
file(GLOB AWS_SDK_CORE_SRC
|
||||
"${AWS_SDK_CORE_DIR}/source/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/bearer-token-provider/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/signer/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/auth/signer-provider/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/client/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/config/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/config/defaults/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/endpoint/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/endpoint/internal/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/memory/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/memory/stl/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/stream/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/threading/*.cpp"
|
||||
"${AWS_SDK_CORE_DIR}/source/utils/xml/*.cpp"
|
||||
)
|
||||
|
||||
if(OS_LINUX OR OS_DARWIN)
|
||||
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/linux-shared/*.cpp")
|
||||
file(GLOB AWS_SDK_CORE_PLATFORM_SRC "${AWS_SDK_CORE_DIR}/source/platform/linux-shared/*.cpp")
|
||||
else()
|
||||
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/*.cpp")
|
||||
set(AWS_SDK_CORE_PLATFORM_SRC)
|
||||
endif()
|
||||
|
||||
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
|
||||
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||
|
||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MAJOR=1")
|
||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MINOR=10")
|
||||
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_PATCH=36")
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
||||
|
||||
list(APPEND AWS_PUBLIC_INCLUDES
|
||||
"${AWS_SDK_CORE_DIR}/include/"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||
)
|
||||
|
||||
|
||||
# aws-cpp-sdk-s3
|
||||
file(GLOB AWS_SDK_S3_SRC
|
||||
"${AWS_SDK_S3_DIR}/source/*.cpp"
|
||||
"${AWS_SDK_S3_DIR}/source/model/*.cpp"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-auth
|
||||
file(GLOB AWS_AUTH_SRC
|
||||
"${AWS_AUTH_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_AUTH_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_AUTH_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-cal
|
||||
file(GLOB AWS_CAL_SRC
|
||||
"${AWS_CAL_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
if (ENABLE_OPENSSL_ENCRYPTION)
|
||||
file(GLOB AWS_CAL_OS_SRC
|
||||
"${AWS_CAL_DIR}/source/unix/*.c"
|
||||
)
|
||||
list(APPEND AWS_PRIVATE_LIBS OpenSSL::Crypto)
|
||||
endif()
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_CAL_SRC} ${AWS_CAL_OS_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CAL_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-event-stream
|
||||
file(GLOB AWS_EVENT_STREAM_SRC
|
||||
"${AWS_EVENT_STREAM_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_EVENT_STREAM_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_EVENT_STREAM_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-common
|
||||
file(GLOB AWS_COMMON_SRC
|
||||
"${AWS_COMMON_DIR}/source/*.c"
|
||||
"${AWS_COMMON_DIR}/source/external/*.c"
|
||||
"${AWS_COMMON_DIR}/source/posix/*.c"
|
||||
)
|
||||
|
||||
file(GLOB AWS_COMMON_ARCH_SRC
|
||||
"${AWS_COMMON_DIR}/source/arch/generic/*.c"
|
||||
)
|
||||
|
||||
if (AWS_ARCH_INTEL)
|
||||
file(GLOB AWS_COMMON_ARCH_SRC
|
||||
"${AWS_COMMON_DIR}/source/arch/intel/cpuid.c"
|
||||
"${AWS_COMMON_DIR}/source/arch/intel/asm/*.c"
|
||||
)
|
||||
elseif (AWS_ARCH_ARM64 OR AWS_ARCH_ARM32)
|
||||
if (AWS_HAVE_AUXV)
|
||||
file(GLOB AWS_COMMON_ARCH_SRC
|
||||
"${AWS_COMMON_DIR}/source/arch/arm/asm/*.c"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
set(AWS_COMMON_AVX2_SRC)
|
||||
if (HAVE_AVX2_INTRINSICS)
|
||||
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_SIMD_ENCODING")
|
||||
set(AWS_COMMON_AVX2_SRC "${AWS_COMMON_DIR}/source/arch/intel/encoding_avx2.c")
|
||||
set_source_files_properties(${AWS_COMMON_AVX2_SRC} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
|
||||
endif()
|
||||
|
||||
configure_file("${AWS_COMMON_DIR}/include/aws/common/config.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_COMMON_SRC} ${AWS_COMMON_ARCH_SRC} ${AWS_COMMON_AVX2_SRC})
|
||||
|
||||
list(APPEND AWS_PUBLIC_INCLUDES
|
||||
"${AWS_COMMON_DIR}/include/"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include"
|
||||
)
|
||||
|
||||
|
||||
# aws-checksums
|
||||
file(GLOB AWS_CHECKSUMS_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/intel/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
|
||||
)
|
||||
|
||||
if(AWS_ARCH_INTEL AND AWS_HAVE_GCC_INLINE_ASM)
|
||||
file(GLOB AWS_CHECKSUMS_ARCH_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
|
||||
)
|
||||
endif()
|
||||
|
||||
if (AWS_ARCH_ARM64)
|
||||
file(GLOB AWS_CHECKSUMS_ARCH_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
|
||||
)
|
||||
set_source_files_properties("${AWS_CHECKSUMS_DIR}/source/arm/crc32c_arm.c" PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
|
||||
elseif (AWS_ARCH_ARM32)
|
||||
if (AWS_ARM32_CRC)
|
||||
file(GLOB AWS_CHECKSUMS_ARCH_SRC
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
|
||||
"${AWS_CHECKSUMS_DIR}/source/arm/asm/*.c"
|
||||
)
|
||||
set_source_files_properties(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_CHECKSUMS_SRC} ${AWS_CHECKSUMS_ARCH_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CHECKSUMS_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-io
|
||||
file(GLOB AWS_IO_SRC
|
||||
"${AWS_IO_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
if (OS_LINUX)
|
||||
file(GLOB AWS_IO_OS_SRC
|
||||
"${AWS_IO_DIR}/source/linux/*.c"
|
||||
"${AWS_IO_DIR}/source/posix/*.c"
|
||||
)
|
||||
elseif (OS_DARWIN)
|
||||
file(GLOB AWS_IO_OS_SRC
|
||||
"${AWS_IO_DIR}/source/bsd/*.c"
|
||||
"${AWS_IO_DIR}/source/posix/*.c"
|
||||
)
|
||||
endif()
|
||||
|
||||
set(AWS_IO_TLS_SRC)
|
||||
if (USE_S2N)
|
||||
file(GLOB AWS_IO_TLS_SRC
|
||||
"${AWS_IO_DIR}/source/s2n/*.c"
|
||||
)
|
||||
endif()
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_IO_SRC} ${AWS_IO_OS_SRC} ${AWS_IO_TLS_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_IO_DIR}/include/")
|
||||
|
||||
|
||||
# aws-s2n-tls
|
||||
if (USE_S2N)
|
||||
file(GLOB AWS_S2N_TLS_SRC
|
||||
"${AWS_S2N_TLS_DIR}/crypto/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/error/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/stuffer/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/pq-crypto/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/pq-crypto/kyber_r3/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/tls/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/tls/extensions/*.c"
|
||||
"${AWS_S2N_TLS_DIR}/utils/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_S2N_TLS_SRC})
|
||||
|
||||
list(APPEND AWS_PRIVATE_INCLUDES
|
||||
"${AWS_S2N_TLS_DIR}/"
|
||||
"${AWS_S2N_TLS_DIR}/api/"
|
||||
)
|
||||
endif()
|
||||
|
||||
|
||||
# aws-crt-cpp
|
||||
file(GLOB AWS_CRT_SRC
|
||||
"${AWS_CRT_DIR}/source/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/auth/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/crypto/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/endpoints/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/external/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/http/*.cpp"
|
||||
"${AWS_CRT_DIR}/source/io/*.cpp"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_CRT_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_CRT_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-mqtt
|
||||
file(GLOB AWS_MQTT_SRC
|
||||
"${AWS_MQTT_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_MQTT_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_MQTT_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-http
|
||||
file(GLOB AWS_HTTP_SRC
|
||||
"${AWS_HTTP_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_HTTP_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_HTTP_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-compression
|
||||
file(GLOB AWS_COMPRESSION_SRC
|
||||
"${AWS_COMPRESSION_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_COMPRESSION_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_COMPRESSION_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-s3
|
||||
file(GLOB AWS_S3_SRC
|
||||
"${AWS_S3_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_S3_SRC})
|
||||
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_S3_DIR}/include/")
|
||||
|
||||
|
||||
# aws-c-sdkutils
|
||||
file(GLOB AWS_SDKUTILS_SRC
|
||||
"${AWS_SDKUTILS_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDKUTILS_SRC})
|
||||
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDKUTILS_DIR}/include/")
|
||||
|
||||
|
||||
# Add library.
|
||||
add_library(_aws ${AWS_SOURCES})
|
||||
|
||||
target_include_directories(_aws SYSTEM BEFORE PUBLIC ${AWS_PUBLIC_INCLUDES})
|
||||
target_include_directories(_aws SYSTEM BEFORE PRIVATE ${AWS_PRIVATE_INCLUDES})
|
||||
target_compile_definitions(_aws PUBLIC ${AWS_PUBLIC_COMPILE_DEFS})
|
||||
target_compile_definitions(_aws PRIVATE ${AWS_PRIVATE_COMPILE_DEFS})
|
||||
target_link_libraries(_aws PRIVATE ${AWS_PRIVATE_LIBS})
|
||||
|
||||
aws_set_thread_affinity_method(_aws)
|
||||
aws_set_thread_name_method(_aws)
|
||||
|
||||
# The library is large - avoid bloat.
|
||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||
target_compile_options (_aws PRIVATE -g0)
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::aws_s3 ALIAS _aws)
|
1
contrib/aws-crt-cpp
vendored
Submodule
1
contrib/aws-crt-cpp
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit ec0bea288f451d884c0d80d534bc5c66241c39a4
|
1
contrib/aws-s2n-tls
vendored
Submodule
1
contrib/aws-s2n-tls
vendored
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit 15d534e8a9ca1eda6bacee514e37d08b4f38a526
|
@ -1,122 +0,0 @@
|
||||
if(NOT OS_FREEBSD)
|
||||
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
|
||||
elseif(ENABLE_S3)
|
||||
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on FreeBSD")
|
||||
endif()
|
||||
|
||||
if(NOT ENABLE_S3)
|
||||
message(STATUS "Not using S3")
|
||||
return()
|
||||
endif()
|
||||
|
||||
SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
|
||||
SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core")
|
||||
SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
|
||||
SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
|
||||
SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
|
||||
|
||||
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
|
||||
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||
|
||||
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
|
||||
|
||||
|
||||
file(GLOB AWS_CORE_SOURCES
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
|
||||
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AWS_S3_SOURCES
|
||||
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AWS_S3_MODEL_SOURCES
|
||||
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
|
||||
)
|
||||
|
||||
file(GLOB AWS_EVENT_STREAM_SOURCES
|
||||
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
|
||||
)
|
||||
|
||||
file(GLOB AWS_COMMON_SOURCES
|
||||
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
|
||||
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
|
||||
)
|
||||
|
||||
file(GLOB AWS_CHECKSUMS_SOURCES
|
||||
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
|
||||
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
|
||||
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
|
||||
)
|
||||
|
||||
file(GLOB S3_UNIFIED_SRC
|
||||
${AWS_EVENT_STREAM_SOURCES}
|
||||
${AWS_COMMON_SOURCES}
|
||||
${AWS_S3_SOURCES}
|
||||
${AWS_S3_MODEL_SOURCES}
|
||||
${AWS_CORE_SOURCES}
|
||||
)
|
||||
|
||||
set(S3_INCLUDES
|
||||
"${AWS_COMMON_LIBRARY_DIR}/include/"
|
||||
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
|
||||
"${AWS_S3_LIBRARY_DIR}/include/"
|
||||
"${AWS_CORE_LIBRARY_DIR}/include/"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/"
|
||||
)
|
||||
|
||||
add_library(_aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
|
||||
target_include_directories(_aws_s3_checksums SYSTEM PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
|
||||
if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
target_compile_definitions(_aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
|
||||
endif()
|
||||
set_target_properties(_aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
|
||||
set_property(TARGET _aws_s3_checksums PROPERTY C_STANDARD 99)
|
||||
|
||||
add_library(_aws_s3 ${S3_UNIFIED_SRC})
|
||||
|
||||
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
|
||||
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
|
||||
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
|
||||
target_include_directories(_aws_s3 SYSTEM BEFORE PUBLIC ${S3_INCLUDES})
|
||||
|
||||
if (TARGET OpenSSL::SSL)
|
||||
target_compile_definitions(_aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
|
||||
target_link_libraries(_aws_s3 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
|
||||
endif()
|
||||
|
||||
target_link_libraries(_aws_s3 PRIVATE _aws_s3_checksums)
|
||||
|
||||
# The library is large - avoid bloat.
|
||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||
target_compile_options (_aws_s3 PRIVATE -g0)
|
||||
target_compile_options (_aws_s3_checksums PRIVATE -g0)
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::aws_s3 ALIAS _aws_s3)
|
@ -139,13 +139,6 @@ if(NOT OPENSSL_NO_ASM)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
add_definitions(-DBORINGSSL_SHARED_LIBRARY)
|
||||
# Enable position-independent code globally. This is needed because
|
||||
# some library targets are OBJECT libraries.
|
||||
set(CMAKE_POSITION_INDEPENDENT_CODE TRUE)
|
||||
endif()
|
||||
|
||||
set(
|
||||
CRYPTO_ios_aarch64_SOURCES
|
||||
|
||||
|
@ -63,13 +63,8 @@ SET(SRCS
|
||||
"${LIBRARY_DIR}/src/lib/windows_port.c"
|
||||
)
|
||||
|
||||
if (USE_STATIC_LIBRARIES)
|
||||
add_library(_c-ares STATIC ${SRCS})
|
||||
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
|
||||
else()
|
||||
add_library(_c-ares SHARED ${SRCS})
|
||||
target_compile_definitions(_c-ares PUBLIC CARES_BUILDING_LIBRARY)
|
||||
endif()
|
||||
add_library(_c-ares STATIC ${SRCS})
|
||||
target_compile_definitions(_c-ares PUBLIC CARES_STATICLIB)
|
||||
|
||||
target_compile_definitions(_c-ares PRIVATE HAVE_CONFIG_H=1)
|
||||
|
||||
|
2
contrib/googletest
vendored
2
contrib/googletest
vendored
@ -1 +1 @@
|
||||
Subproject commit e7e591764baba0a0c3c9ad0014430e7a27331d16
|
||||
Subproject commit 71140c3ca7a87bb1b5b9c9f1500fea8858cce344
|
@ -136,11 +136,6 @@ add_library(ch_contrib::uv ALIAS _uv)
|
||||
target_compile_definitions(_uv PRIVATE ${uv_defines})
|
||||
target_include_directories(_uv SYSTEM PUBLIC ${SOURCE_DIR}/include PRIVATE ${SOURCE_DIR}/src)
|
||||
target_link_libraries(_uv ${uv_libraries})
|
||||
if (NOT USE_STATIC_LIBRARIES)
|
||||
target_compile_definitions(_uv
|
||||
INTERFACE USING_UV_SHARED=1
|
||||
PRIVATE BUILDING_UV_SHARED=1)
|
||||
endif()
|
||||
|
||||
if(UNIX)
|
||||
# Now for some gibbering horrors from beyond the stars...
|
||||
|
@ -6,8 +6,6 @@ endif()
|
||||
|
||||
option (ENABLE_EMBEDDED_COMPILER "Enable support for 'compile_expressions' option for query execution" ${ENABLE_EMBEDDED_COMPILER_DEFAULT})
|
||||
|
||||
# If USE_STATIC_LIBRARIES=0 was passed to CMake, we'll still build LLVM statically to keep complexity minimal.
|
||||
|
||||
if (NOT ENABLE_EMBEDDED_COMPILER)
|
||||
message(STATUS "Not using LLVM")
|
||||
return()
|
||||
|
@ -1,4 +1,4 @@
|
||||
if (NOT OS_FREEBSD AND NOT SPLIT_SHARED_LIBRARIES AND NOT (OS_DARWIN AND COMPILER_CLANG))
|
||||
if (NOT OS_FREEBSD AND NOT (OS_DARWIN AND COMPILER_CLANG))
|
||||
option (ENABLE_SENTRY "Enable Sentry" ${ENABLE_LIBRARIES})
|
||||
else()
|
||||
option (ENABLE_SENTRY "Enable Sentry" OFF)
|
||||
@ -51,11 +51,7 @@ endif()
|
||||
|
||||
add_library(_sentry ${SRCS})
|
||||
|
||||
if(BUILD_SHARED_LIBS)
|
||||
target_compile_definitions(_sentry PRIVATE SENTRY_BUILD_SHARED)
|
||||
else()
|
||||
target_compile_definitions(_sentry PUBLIC SENTRY_BUILD_STATIC)
|
||||
endif()
|
||||
target_compile_definitions(_sentry PUBLIC SENTRY_BUILD_STATIC)
|
||||
|
||||
target_link_libraries(_sentry PRIVATE ch_contrib::curl pthread)
|
||||
target_include_directories(_sentry PUBLIC "${SRC_DIR}/include" PRIVATE "${SRC_DIR}/src")
|
||||
|
2
contrib/sysroot
vendored
2
contrib/sysroot
vendored
@ -1 +1 @@
|
||||
Subproject commit 0f41651860fa4a530ecd68b93a15b8fd77397adf
|
||||
Subproject commit f0081b2649b94837855f3bc7d05ef326b100bad8
|
@ -2,7 +2,6 @@
|
||||
"docker/packager/binary": {
|
||||
"name": "clickhouse/binary-builder",
|
||||
"dependent": [
|
||||
"docker/test/split_build_smoke_test",
|
||||
"docker/test/codebrowser"
|
||||
]
|
||||
},
|
||||
@ -55,10 +54,6 @@
|
||||
"name": "clickhouse/stress-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/split_build_smoke_test": {
|
||||
"name": "clickhouse/split-build-smoke-test",
|
||||
"dependent": []
|
||||
},
|
||||
"docker/test/codebrowser": {
|
||||
"name": "clickhouse/codebrowser",
|
||||
"dependent": []
|
||||
|
@ -108,11 +108,6 @@ mv ./programs/clickhouse* /output
|
||||
[ -x ./programs/self-extracting/clickhouse ] && mv ./programs/self-extracting/clickhouse /output
|
||||
mv ./src/unit_tests_dbms /output ||: # may not exist for some binary builds
|
||||
|
||||
# Exclude cargo build directory since it may have some shared libraries
|
||||
# (even though they are not required for the clickhouse binary)
|
||||
find . -name '*.so' -not -path '*/cargo/*' -print -exec mv '{}' /output \;
|
||||
find . -name '*.so.*' -not -path '*/cargo/*' -print -exec mv '{}' /output \;
|
||||
|
||||
prepare_combined_output () {
|
||||
local OUTPUT
|
||||
OUTPUT="$1"
|
||||
@ -168,7 +163,7 @@ then
|
||||
)
|
||||
fi
|
||||
|
||||
# May be set for split build or for performance test.
|
||||
# May be set for performance test.
|
||||
if [ "" != "$COMBINED_OUTPUT" ]
|
||||
then
|
||||
prepare_combined_output /output
|
||||
|
@ -100,12 +100,11 @@ def run_docker_image_with_env(
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def is_release_build(build_type, package_type, sanitizer, shared_libraries):
|
||||
def is_release_build(build_type, package_type, sanitizer):
|
||||
return (
|
||||
build_type == ""
|
||||
and package_type == "deb"
|
||||
and sanitizer == ""
|
||||
and not shared_libraries
|
||||
)
|
||||
|
||||
|
||||
@ -116,7 +115,6 @@ def parse_env_variables(
|
||||
package_type,
|
||||
cache,
|
||||
distcc_hosts,
|
||||
shared_libraries,
|
||||
clang_tidy,
|
||||
version,
|
||||
author,
|
||||
@ -218,7 +216,7 @@ def parse_env_variables(
|
||||
cmake_flags.append("-DCMAKE_INSTALL_PREFIX=/usr")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_SYSCONFDIR=/etc")
|
||||
cmake_flags.append("-DCMAKE_INSTALL_LOCALSTATEDIR=/var")
|
||||
if is_release_build(build_type, package_type, sanitizer, shared_libraries):
|
||||
if is_release_build(build_type, package_type, sanitizer):
|
||||
cmake_flags.append("-DSPLIT_DEBUG_SYMBOLS=ON")
|
||||
result.append("WITH_PERFORMANCE=1")
|
||||
if is_cross_arm:
|
||||
@ -231,12 +229,10 @@ def parse_env_variables(
|
||||
cmake_flags.append(f"-DCMAKE_C_COMPILER={cc}")
|
||||
cmake_flags.append(f"-DCMAKE_CXX_COMPILER={cxx}")
|
||||
|
||||
# Create combined output archive for shared library build and for performance tests.
|
||||
# Create combined output archive for performance tests.
|
||||
if package_type == "coverity":
|
||||
result.append("COMBINED_OUTPUT=coverity")
|
||||
result.append('COVERITY_TOKEN="$COVERITY_TOKEN"')
|
||||
elif shared_libraries:
|
||||
result.append("COMBINED_OUTPUT=shared_build")
|
||||
|
||||
if sanitizer:
|
||||
result.append(f"SANITIZER={sanitizer}")
|
||||
@ -285,15 +281,6 @@ def parse_env_variables(
|
||||
result.append("BINARY_OUTPUT=tests")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
|
||||
if shared_libraries:
|
||||
cmake_flags.append("-DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1")
|
||||
# We can't always build utils because it requires too much space, but
|
||||
# we have to build them at least in some way in CI. The shared library
|
||||
# build is probably the least heavy disk-wise.
|
||||
cmake_flags.append("-DENABLE_UTILS=1")
|
||||
# utils are not included into clickhouse-bundle, so build everything
|
||||
build_target = "all"
|
||||
|
||||
if clang_tidy:
|
||||
cmake_flags.append("-DENABLE_CLANG_TIDY=1")
|
||||
cmake_flags.append("-DENABLE_TESTS=1")
|
||||
@ -371,7 +358,6 @@ if __name__ == "__main__":
|
||||
default="",
|
||||
)
|
||||
|
||||
parser.add_argument("--shared-libraries", action="store_true")
|
||||
parser.add_argument("--clang-tidy", action="store_true")
|
||||
parser.add_argument("--cache", choices=("ccache", "distcc", ""), default="")
|
||||
parser.add_argument(
|
||||
@ -424,7 +410,6 @@ if __name__ == "__main__":
|
||||
args.package_type,
|
||||
args.cache,
|
||||
args.distcc_hosts,
|
||||
args.shared_libraries,
|
||||
args.clang_tidy,
|
||||
args.version,
|
||||
args.author,
|
||||
|
@ -267,7 +267,7 @@ quit
|
||||
echo "Lost connection to server. See the logs." > description.txt
|
||||
fi
|
||||
|
||||
if grep -F --text 'Sanitizer: out-of-memory' description.txt
|
||||
if grep -E --text 'Sanitizer: (out-of-memory|failed to allocate)' description.txt
|
||||
then
|
||||
# OOM of sanitizer is not a problem we can handle - treat it as success, but preserve the description.
|
||||
task_exit_code=0
|
||||
|
@ -83,6 +83,7 @@ RUN python3 -m pip install \
|
||||
pytest \
|
||||
pytest-order==1.0.0 \
|
||||
pytest-timeout \
|
||||
pytest-random \
|
||||
pytest-xdist \
|
||||
pytest-repeat \
|
||||
pytz \
|
||||
|
@ -297,6 +297,7 @@ if not args.use_existing_tables:
|
||||
|
||||
# Let's sync the data to avoid writeback affects performance
|
||||
os.system("sync")
|
||||
reportStageEnd("sync")
|
||||
|
||||
# By default, test all queries.
|
||||
queries_to_run = range(0, len(test_queries))
|
||||
|
@ -1,9 +0,0 @@
|
||||
# rebuild in #33610
|
||||
# docker build -t clickhouse/split-build-smoke-test .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/binary-builder:$FROM_TAG
|
||||
|
||||
COPY run.sh /run.sh
|
||||
COPY process_split_build_smoke_test_result.py /
|
||||
|
||||
CMD /run.sh
|
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import logging
|
||||
import argparse
|
||||
import csv
|
||||
|
||||
RESULT_LOG_NAME = "run.log"
|
||||
|
||||
|
||||
def process_result(result_folder):
|
||||
|
||||
status = "success"
|
||||
description = "Server started and responded"
|
||||
summary = [("Smoke test", "OK")]
|
||||
with open(os.path.join(result_folder, RESULT_LOG_NAME), "r") as run_log:
|
||||
lines = run_log.read().split("\n")
|
||||
if not lines or lines[0].strip() != "OK":
|
||||
status = "failure"
|
||||
logging.info("Lines is not ok: %s", str("\n".join(lines)))
|
||||
summary = [("Smoke test", "FAIL")]
|
||||
description = "Server failed to respond, see result in logs"
|
||||
|
||||
result_logs = []
|
||||
server_log_path = os.path.join(result_folder, "clickhouse-server.log")
|
||||
stderr_log_path = os.path.join(result_folder, "stderr.log")
|
||||
client_stderr_log_path = os.path.join(result_folder, "clientstderr.log")
|
||||
|
||||
if os.path.exists(server_log_path):
|
||||
result_logs.append(server_log_path)
|
||||
|
||||
if os.path.exists(stderr_log_path):
|
||||
result_logs.append(stderr_log_path)
|
||||
|
||||
if os.path.exists(client_stderr_log_path):
|
||||
result_logs.append(client_stderr_log_path)
|
||||
|
||||
return status, description, summary, result_logs
|
||||
|
||||
|
||||
def write_results(results_file, status_file, results, status):
|
||||
with open(results_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerows(results)
|
||||
with open(status_file, "w") as f:
|
||||
out = csv.writer(f, delimiter="\t")
|
||||
out.writerow(status)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="ClickHouse script for parsing results of split build smoke test"
|
||||
)
|
||||
parser.add_argument("--in-results-dir", default="/test_output/")
|
||||
parser.add_argument("--out-results-file", default="/test_output/test_results.tsv")
|
||||
parser.add_argument("--out-status-file", default="/test_output/check_status.tsv")
|
||||
args = parser.parse_args()
|
||||
|
||||
state, description, test_results, logs = process_result(args.in_results_dir)
|
||||
logging.info("Result parsed")
|
||||
status = (state, description)
|
||||
write_results(args.out_results_file, args.out_status_file, test_results, status)
|
||||
logging.info("Result written")
|
@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -x
|
||||
|
||||
install_and_run_server() {
|
||||
mkdir /unpacked
|
||||
tar -xzf /package_folder/shared_build.tgz -C /unpacked --strip 1
|
||||
LD_LIBRARY_PATH=/unpacked /unpacked/clickhouse-server --config /unpacked/config/config.xml >/test_output/stderr.log 2>&1 &
|
||||
}
|
||||
|
||||
run_client() {
|
||||
for i in {1..100}; do
|
||||
sleep 1
|
||||
LD_LIBRARY_PATH=/unpacked /unpacked/clickhouse-client --query "select 'OK'" > /test_output/run.log 2> /test_output/clientstderr.log && break
|
||||
[[ $i == 100 ]] && echo 'FAIL'
|
||||
done
|
||||
}
|
||||
|
||||
install_and_run_server
|
||||
run_client
|
||||
mv /var/log/clickhouse-server/clickhouse-server.log /test_output/clickhouse-server.log
|
||||
/process_split_build_smoke_test_result.py || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
|
@ -53,6 +53,7 @@ function configure()
|
||||
local total_mem
|
||||
total_mem=$(awk '/MemTotal/ { print $(NF-1) }' /proc/meminfo) # KiB
|
||||
total_mem=$(( total_mem*1024 )) # bytes
|
||||
|
||||
# Set maximum memory usage as half of total memory (less chance of OOM).
|
||||
#
|
||||
# But not via max_server_memory_usage but via max_memory_usage_for_user,
|
||||
@ -65,16 +66,17 @@ function configure()
|
||||
# max_server_memory_usage will be hard limit, and queries that should be
|
||||
# executed regardless memory limits will use max_memory_usage_for_user=0,
|
||||
# instead of relying on max_untracked_memory
|
||||
local max_server_mem
|
||||
max_server_mem=$((total_mem*75/100)) # 75%
|
||||
echo "Setting max_server_memory_usage=$max_server_mem"
|
||||
|
||||
max_server_memory_usage_to_ram_ratio=0.5
|
||||
echo "Setting max_server_memory_usage_to_ram_ratio to ${max_server_memory_usage_to_ram_ratio}"
|
||||
cat > /etc/clickhouse-server/config.d/max_server_memory_usage.xml <<EOL
|
||||
<clickhouse>
|
||||
<max_server_memory_usage>${max_server_mem}</max_server_memory_usage>
|
||||
<max_server_memory_usage_to_ram_ratio>${max_server_memory_usage_to_ram_ratio}</max_server_memory_usage_to_ram_ratio>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
local max_users_mem
|
||||
max_users_mem=$((total_mem*50/100)) # 50%
|
||||
max_users_mem=$((total_mem*30/100)) # 30%
|
||||
echo "Setting max_memory_usage_for_user=$max_users_mem"
|
||||
cat > /etc/clickhouse-server/users.d/max_memory_usage_for_user.xml <<EOL
|
||||
<clickhouse>
|
||||
@ -97,6 +99,13 @@ EOL
|
||||
-->
|
||||
<core_path>$PWD</core_path>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Let OOM killer terminate other processes before clickhouse-server:
|
||||
cat > /etc/clickhouse-server/config.d/oom_score.xml <<EOL
|
||||
<clickhouse>
|
||||
<oom_score>-1000</oom_score>
|
||||
</clickhouse>
|
||||
EOL
|
||||
|
||||
# Analyzer is not yet ready for testing
|
||||
|
@ -118,7 +118,6 @@ Builds ClickHouse in various configurations for use in further steps. You have t
|
||||
- **Compiler**: `gcc-9` or `clang-10` (or `clang-10-xx` for other architectures e.g. `clang-10-freebsd`).
|
||||
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
|
||||
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
|
||||
- **Split** `splitted` is a [split build](../development/build.md#split-build)
|
||||
- **Status**: `success` or `fail`
|
||||
- **Build log**: link to the building and files copying log, useful when build failed.
|
||||
- **Build time**.
|
||||
@ -130,7 +129,6 @@ Builds ClickHouse in various configurations for use in further steps. You have t
|
||||
- `clickhouse`: Main built binary.
|
||||
- `clickhouse-odbc-bridge`
|
||||
- `unit_tests_dbms`: GoogleTest binary with ClickHouse unit tests.
|
||||
- `shared_build.tgz`: build with shared libraries.
|
||||
- `performance.tgz`: Special package for performance tests.
|
||||
|
||||
|
||||
@ -169,16 +167,6 @@ concurrency-related errors. If it fails:
|
||||
of error.
|
||||
|
||||
|
||||
## Split Build Smoke Test
|
||||
|
||||
Checks that the server build in [split build](../development/developer-instruction.md#split-build)
|
||||
configuration can start and run simple queries. If it fails:
|
||||
|
||||
* Fix other test errors first;
|
||||
* Build the server in [split build](../development/developer-instruction.md#split-build) configuration
|
||||
locally and check whether it can start and run `select 1`.
|
||||
|
||||
|
||||
## Compatibility Check
|
||||
Checks that `clickhouse` binary runs on distributions with old libc versions. If it fails, ask a maintainer for help.
|
||||
|
||||
|
@ -621,7 +621,7 @@ CREATE TABLE example_table
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(d)
|
||||
ORDER BY d
|
||||
TTL d + INTERVAL 1 MONTH [DELETE],
|
||||
TTL d + INTERVAL 1 MONTH DELETE,
|
||||
d + INTERVAL 1 WEEK TO VOLUME 'aaa',
|
||||
d + INTERVAL 2 WEEK TO DISK 'bbb';
|
||||
```
|
||||
|
@ -6,6 +6,17 @@ sidebar_label: Data Replication
|
||||
|
||||
# Data Replication
|
||||
|
||||
:::note
|
||||
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
|
||||
```
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
|
||||
```
|
||||
with:
|
||||
```
|
||||
ENGINE = ReplicatedReplacingMergeTree
|
||||
```
|
||||
:::
|
||||
|
||||
Replication is only supported for tables in the MergeTree family:
|
||||
|
||||
- ReplicatedMergeTree
|
||||
@ -85,15 +96,15 @@ Example of setting the addresses of the auxiliary ZooKeeper cluster:
|
||||
</auxiliary_zookeepers>
|
||||
```
|
||||
|
||||
To store table metadata in an auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
||||
ReplicatedMergeTree engine as follow:
|
||||
To store table metadata in an auxiliary ZooKeeper cluster instead of the default ZooKeeper cluster, we can use SQL to create the table with
|
||||
ReplicatedMergeTree engine as follows:
|
||||
|
||||
```
|
||||
CREATE TABLE table_name ( ... ) ENGINE = ReplicatedMergeTree('zookeeper_name_configured_in_auxiliary_zookeepers:path', 'replica_name') ...
|
||||
```
|
||||
You can specify any existing ZooKeeper cluster and the system will use a directory on it for its own data (the directory is specified when creating a replicatable table).
|
||||
|
||||
If ZooKeeper isn’t set in the config file, you can’t create replicated tables, and any existing replicated tables will be read-only.
|
||||
If ZooKeeper is not set in the config file, you can’t create replicated tables, and any existing replicated tables will be read-only.
|
||||
|
||||
ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-fallback_to_stale_replicas_for_distributed_queries).
|
||||
|
||||
@ -119,8 +130,23 @@ The system monitors data synchronicity on replicas and is able to recover after
|
||||
|
||||
## Creating Replicated Tables {#creating-replicated-tables}
|
||||
|
||||
:::note
|
||||
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
|
||||
```
|
||||
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
|
||||
```
|
||||
with:
|
||||
```
|
||||
ENGINE = ReplicatedReplacingMergeTree
|
||||
```
|
||||
:::
|
||||
|
||||
The `Replicated` prefix is added to the table engine name. For example:`ReplicatedMergeTree`.
|
||||
|
||||
:::tip
|
||||
Adding `Replicated` is optional in ClickHouse Cloud, as all of the tables are replicated.
|
||||
:::
|
||||
|
||||
### Replicated\*MergeTree parameters
|
||||
|
||||
#### zoo_path
|
||||
@ -144,7 +170,7 @@ CREATE TABLE table_name
|
||||
CounterID UInt32,
|
||||
UserID UInt32,
|
||||
ver UInt16
|
||||
) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', ver)
|
||||
) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
|
||||
PARTITION BY toYYYYMM(EventDate)
|
||||
ORDER BY (CounterID, EventDate, intHash32(UserID))
|
||||
SAMPLE BY intHash32(UserID);
|
||||
@ -160,7 +186,7 @@ CREATE TABLE table_name
|
||||
EventDate DateTime,
|
||||
CounterID UInt32,
|
||||
UserID UInt32
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192);
|
||||
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192);
|
||||
```
|
||||
|
||||
</details>
|
||||
@ -171,7 +197,6 @@ Example:
|
||||
|
||||
``` xml
|
||||
<macros>
|
||||
<layer>05</layer>
|
||||
<shard>02</shard>
|
||||
<replica>example05-02-1</replica>
|
||||
</macros>
|
||||
@ -182,12 +207,12 @@ In this case, the path consists of the following parts:
|
||||
|
||||
`/clickhouse/tables/` is the common prefix. We recommend using exactly this one.
|
||||
|
||||
`{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the example cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier.
|
||||
`{shard}` will be expanded to the shard identifier.
|
||||
|
||||
`table_name` is the name of the node for the table in ClickHouse Keeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it does not change after a RENAME query.
|
||||
*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name`
|
||||
|
||||
The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`.
|
||||
The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{shard}/{database}/{table}'`.
|
||||
Be careful with table renames when using these built-in substitutions. The path in ClickHouse Keeper cannot be changed, and when the table is renamed, the macros will expand into a different path, the table will refer to a path that does not exist in ClickHouse Keeper, and will go into read-only mode.
|
||||
|
||||
The replica name identifies different replicas of the same table. You can use the server name for this, as in the example. The name only needs to be unique within each shard.
|
||||
|
@ -9,6 +9,29 @@ slug: /en/operations/backup
|
||||
- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk)
|
||||
- [Alternatives](#alternatives)
|
||||
|
||||
## Command summary
|
||||
|
||||
```bash
|
||||
BACKUP|RESTORE
|
||||
TABLE [db.]table_name [AS [db.]table_name_in_backup]
|
||||
[PARTITION[S] partition_expr [,...]] |
|
||||
DICTIONARY [db.]dictionary_name [AS [db.]name_in_backup] |
|
||||
DATABASE database_name [AS database_name_in_backup]
|
||||
[EXCEPT TABLES ...] |
|
||||
TEMPORARY TABLE table_name [AS table_name_in_backup] |
|
||||
VIEW view_name [AS view_name_in_backup]
|
||||
ALL TEMPORARY TABLES [EXCEPT ...] |
|
||||
ALL DATABASES [EXCEPT ...] } [,...]
|
||||
[ON CLUSTER 'cluster_name']
|
||||
TO|FROM File('<path>/<filename>') | Disk('<disk_name>', '<path>/') | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')
|
||||
[SETTINGS base_backup = File('<path>/<filename>') | Disk(...) | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')]
|
||||
|
||||
```
|
||||
|
||||
:::note ALL
|
||||
`ALL` is only applicable to the `RESTORE` command.
|
||||
:::
|
||||
|
||||
## Background
|
||||
|
||||
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you can’t just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.
|
||||
|
@ -1027,6 +1027,186 @@ Result:
|
||||
└─────────────┘
|
||||
```
|
||||
|
||||
## h3PointDistM
|
||||
|
||||
Returns the "great circle" or "haversine" distance between pairs of GeoCoord points (latitude/longitude) pairs in meters.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3PointDistM(lat1, lon1, lat2, lon2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `lat1`, `lon1` — Latitude and Longitude of point1 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `lat2`, `lon2` — Latitude and Longitude of point2 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Haversine or great circle distance in meters.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select h3PointDistM(-10.0 ,0.0, 10.0, 0.0) as h3PointDistM;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌──────h3PointDistM─┐
|
||||
│ 2223901.039504589 │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## h3PointDistKm
|
||||
|
||||
Returns the "great circle" or "haversine" distance between pairs of GeoCoord points (latitude/longitude) pairs in kilometers.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3PointDistKm(lat1, lon1, lat2, lon2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `lat1`, `lon1` — Latitude and Longitude of point1 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `lat2`, `lon2` — Latitude and Longitude of point2 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Haversine or great circle distance in kilometers.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select h3PointDistKm(-10.0 ,0.0, 10.0, 0.0) as h3PointDistKm;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─────h3PointDistKm─┐
|
||||
│ 2223.901039504589 │
|
||||
└───────────────────┘
|
||||
```
|
||||
|
||||
## h3PointDistRads
|
||||
|
||||
Returns the "great circle" or "haversine" distance between pairs of GeoCoord points (latitude/longitude) pairs in radians.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3PointDistRads(lat1, lon1, lat2, lon2)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `lat1`, `lon1` — Latitude and Longitude of point1 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
- `lat2`, `lon2` — Latitude and Longitude of point2 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Haversine or great circle distance in radians.
|
||||
|
||||
Type: [Float64](../../../sql-reference/data-types/float.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select h3PointDistRads(-10.0 ,0.0, 10.0, 0.0) as h3PointDistRads;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌────h3PointDistRads─┐
|
||||
│ 0.3490658503988659 │
|
||||
└────────────────────┘
|
||||
```
|
||||
|
||||
## h3GetRes0Indexes
|
||||
|
||||
Returns an array of all the resolution 0 H3 indexes.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3GetRes0Indexes()
|
||||
```
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Array of all the resolution 0 H3 indexes.
|
||||
|
||||
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
|
||||
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
select h3GetRes0Indexes as indexes ;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─indexes─────────────────────────────────────┐
|
||||
│ [576495936675512319,576531121047601151,....]│
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## h3GetPentagonIndexes
|
||||
|
||||
Returns all the pentagon H3 indexes at the specified resolution.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
h3GetPentagonIndexes(resolution)
|
||||
```
|
||||
|
||||
**Parameter**
|
||||
|
||||
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Array of all pentagon H3 indexes.
|
||||
|
||||
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT h3GetPentagonIndexes(3) AS indexes;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─indexes────────────────────────────────────────────────────────┐
|
||||
│ [590112357393367039,590464201114255359,590816044835143679,...] │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## h3Line
|
||||
|
||||
Returns the line of indices between the two indices that are provided.
|
||||
|
@ -8,7 +8,7 @@ sidebar_label: Type Conversion
|
||||
|
||||
## Common Issues of Numeric Conversions
|
||||
|
||||
When you convert a value from one to another data type, you should remember that in common case, it is an unsafe operation that can lead to a data loss. A data loss can occur if you try to fit value from a larger data type to a smaller data type, or if you convert values between different data types.
|
||||
When you convert a value from one to another data type, you should remember that if you try to fit a value from a larger data type to a smaller one (for example Int64 to Int32), or convert from one data type to another (for example `String` to `Int`), you could have data loss. Test beforehand.
|
||||
|
||||
ClickHouse has the [same behavior as C++ programs](https://en.cppreference.com/w/cpp/language/implicit_conversion).
|
||||
|
||||
@ -45,7 +45,7 @@ SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐
|
||||
│ -9223372036854775808 │ 32 │ 16 │ 8 │
|
||||
└──────────────────────┴─────────────┴───────────────┴─────────────┘
|
||||
@ -65,7 +65,7 @@ SELECT toInt64OrZero('123123'), toInt8OrZero('123qwe123');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐
|
||||
│ 123123 │ 0 │
|
||||
└─────────────────────────┴───────────────────────────┘
|
||||
@ -85,7 +85,7 @@ SELECT toInt64OrNull('123123'), toInt8OrNull('123qwe123');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐
|
||||
│ 123123 │ ᴺᵁᴸᴸ │
|
||||
└─────────────────────────┴───────────────────────────┘
|
||||
@ -105,7 +105,7 @@ SELECT toInt64OrDefault('123123', cast('-1' as Int64)), toInt8OrDefault('123qwe1
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toInt64OrDefault('123123', CAST('-1', 'Int64'))─┬─toInt8OrDefault('123qwe123', CAST('-1', 'Int8'))─┐
|
||||
│ 123123 │ -1 │
|
||||
└─────────────────────────────────────────────────┴──────────────────────────────────────────────────┘
|
||||
@ -144,7 +144,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐
|
||||
│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │
|
||||
└─────────────────────┴───────────────┴────────────────┴──────────────┘
|
||||
@ -314,7 +314,7 @@ Type: [Date32](/docs/en/sql-reference/data-types/date32.md).
|
||||
SELECT toDate32('1955-01-01') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
|
||||
│ 1955-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────┘
|
||||
@ -326,7 +326,7 @@ SELECT toDate32('1955-01-01') AS value, toTypeName(value);
|
||||
SELECT toDate32('1899-01-01') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────value─┬─toTypeName(toDate32('1899-01-01'))─┐
|
||||
│ 1900-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────┘
|
||||
@ -338,7 +338,7 @@ SELECT toDate32('1899-01-01') AS value, toTypeName(value);
|
||||
SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──────value─┬─toTypeName(toDate32(toDate('1899-01-01')))─┐
|
||||
│ 1970-01-01 │ Date32 │
|
||||
└────────────┴────────────────────────────────────────────┘
|
||||
@ -358,7 +358,7 @@ SELECT toDate32OrZero('1899-01-01'), toDate32OrZero('');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toDate32OrZero('1899-01-01')─┬─toDate32OrZero('')─┐
|
||||
│ 1900-01-01 │ 1900-01-01 │
|
||||
└──────────────────────────────┴────────────────────┘
|
||||
@ -378,7 +378,7 @@ SELECT toDate32OrNull('1955-01-01'), toDate32OrNull('');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toDate32OrNull('1955-01-01')─┬─toDate32OrNull('')─┐
|
||||
│ 1955-01-01 │ ᴺᵁᴸᴸ │
|
||||
└──────────────────────────────┴────────────────────┘
|
||||
@ -400,7 +400,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toDate32OrDefault('1930-01-01', toDate32('2020-01-01'))─┬─toDate32OrDefault('xx1930-01-01', toDate32('2020-01-01'))─┐
|
||||
│ 1930-01-01 │ 2020-01-01 │
|
||||
└─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘
|
||||
@ -436,7 +436,7 @@ Type: [DateTime64](/docs/en/sql-reference/data-types/datetime64.md).
|
||||
SELECT toDateTime64('1955-01-01 00:00:00.000', 3) AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────────────────value─┬─toTypeName(toDateTime64('1955-01-01 00:00:00.000', 3))─┐
|
||||
│ 1955-01-01 00:00:00.000 │ DateTime64(3) │
|
||||
└─────────────────────────┴────────────────────────────────────────────────────────┘
|
||||
@ -448,7 +448,7 @@ SELECT toDateTime64('1955-01-01 00:00:00.000', 3) AS value, toTypeName(value);
|
||||
SELECT toDateTime64(1546300800.000, 3) AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────────────────value─┬─toTypeName(toDateTime64(1546300800., 3))─┐
|
||||
│ 2019-01-01 00:00:00.000 │ DateTime64(3) │
|
||||
└─────────────────────────┴──────────────────────────────────────────┘
|
||||
@ -460,7 +460,7 @@ Without the decimal point the value is still treated as Unix Timestamp in second
|
||||
SELECT toDateTime64(1546300800000, 3) AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────────────────value─┬─toTypeName(toDateTime64(1546300800000, 3))─┐
|
||||
│ 2282-12-31 00:00:00.000 │ DateTime64(3) │
|
||||
└─────────────────────────┴────────────────────────────────────────────┘
|
||||
@ -473,7 +473,7 @@ SELECT toDateTime64(1546300800000, 3) AS value, toTypeName(value);
|
||||
SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeName(value);
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────────────────value─┬─toTypeName(toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul'))─┐
|
||||
│ 2019-01-01 00:00:00.000 │ DateTime64(3, 'Asia/Istanbul') │
|
||||
└─────────────────────────┴─────────────────────────────────────────────────────────────────────┘
|
||||
@ -522,7 +522,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
|
||||
│ -1.111 │ Nullable(Decimal(9, 5)) │
|
||||
└────────┴────────────────────────────────────────────────────┘
|
||||
@ -536,7 +536,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐
|
||||
│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │
|
||||
└──────┴────────────────────────────────────────────────────┘
|
||||
@ -576,7 +576,7 @@ SELECT toDecimal32OrDefault(toString(-1.111), 5) AS val, toTypeName(val);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 5))─┐
|
||||
│ -1.111 │ Decimal(9, 5) │
|
||||
└────────┴───────────────────────────────────────────────────────┘
|
||||
@ -590,7 +590,7 @@ SELECT toDecimal32OrDefault(toString(-1.111), 2) AS val, toTypeName(val);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 2))─┐
|
||||
│ 0 │ Decimal(9, 2) │
|
||||
└─────┴───────────────────────────────────────────────────────┘
|
||||
@ -629,7 +629,7 @@ SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
|
||||
│ -1.111 │ Decimal(9, 5) │
|
||||
└────────┴────────────────────────────────────────────────────┘
|
||||
@ -643,7 +643,7 @@ SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐
|
||||
│ 0.00 │ Decimal(9, 2) │
|
||||
└──────┴────────────────────────────────────────────────────┘
|
||||
@ -661,7 +661,7 @@ When converting dates with times to numbers or vice versa, the date with time co
|
||||
|
||||
The date and date-with-time formats for the toDate/toDateTime functions are defined as follows:
|
||||
|
||||
``` text
|
||||
```response
|
||||
YYYY-MM-DD
|
||||
YYYY-MM-DD hh:mm:ss
|
||||
```
|
||||
@ -686,7 +686,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌───────────now_local─┬─now_yekat───────────┐
|
||||
│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │
|
||||
└─────────────────────┴─────────────────────┘
|
||||
@ -713,7 +713,7 @@ SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s─────────────┬─s_cut─┐
|
||||
│ foo\0\0\0\0\0 │ foo │
|
||||
└───────────────┴───────┘
|
||||
@ -727,7 +727,7 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─s──────────┬─s_cut─┐
|
||||
│ foo\0bar\0 │ foo │
|
||||
└────────────┴───────┘
|
||||
@ -755,6 +755,10 @@ This function accepts a number or date or date with time and returns a FixedStri
|
||||
|
||||
## reinterpretAsUUID
|
||||
|
||||
:::note
|
||||
In addition to the UUID functions listed here, there is dedicated [UUID function documentation](/docs/en/sql-reference/functions/uuid-functions.md).
|
||||
:::
|
||||
|
||||
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string is longer than 16 bytes, the extra bytes at the end are ignored.
|
||||
|
||||
**Syntax**
|
||||
@ -783,7 +787,7 @@ SELECT reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')));
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')))─┐
|
||||
│ 08090a0b-0c0d-0e0f-0001-020304050607 │
|
||||
└───────────────────────────────────────────────────────────────────────┘
|
||||
@ -803,7 +807,7 @@ SELECT uuid = uuid2;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─equals(uuid, uuid2)─┐
|
||||
│ 1 │
|
||||
└─────────────────────┘
|
||||
@ -904,7 +908,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐
|
||||
│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │
|
||||
└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘
|
||||
@ -924,7 +928,7 @@ SELECT toTypeName(x) FROM t_null;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toTypeName(x)─┐
|
||||
│ Int8 │
|
||||
│ Int8 │
|
||||
@ -939,7 +943,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐
|
||||
│ Nullable(UInt16) │
|
||||
│ Nullable(UInt16) │
|
||||
@ -966,7 +970,7 @@ SELECT cast(-1, 'UInt8') as uint8;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─uint8─┐
|
||||
│ 255 │
|
||||
└───────┘
|
||||
@ -980,7 +984,7 @@ SELECT accurateCast(-1, 'UInt8') as uint8;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in column Int8 cannot be safely converted into type UInt8: While processing accurateCast(-1, 'UInt8') AS uint8.
|
||||
```
|
||||
|
||||
@ -1013,7 +1017,7 @@ SELECT toTypeName(accurateCastOrNull(5, 'UInt8'));
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toTypeName(accurateCastOrNull(5, 'UInt8'))─┐
|
||||
│ Nullable(UInt8) │
|
||||
└────────────────────────────────────────────┘
|
||||
@ -1030,7 +1034,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─uint8─┬─int8─┬─fixed_string─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
|
||||
└───────┴──────┴──────────────┘
|
||||
@ -1067,7 +1071,7 @@ SELECT toTypeName(accurateCastOrDefault(5, 'UInt8'));
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toTypeName(accurateCastOrDefault(5, 'UInt8'))─┐
|
||||
│ UInt8 │
|
||||
└───────────────────────────────────────────────┘
|
||||
@ -1087,7 +1091,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─uint8─┬─uint8_default─┬─int8─┬─int8_default─┬─fixed_string─┬─fixed_string_default─┐
|
||||
│ 0 │ 5 │ 0 │ 5 │ │ Te │
|
||||
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
|
||||
@ -1134,7 +1138,7 @@ SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐
|
||||
│ 2019-01-08 │ 2019-01-08 │
|
||||
└───────────────────────────┴──────────────────────────────┘
|
||||
@ -1183,7 +1187,7 @@ AS parseDateTimeBestEffort;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─parseDateTimeBestEffort─┐
|
||||
│ 2020-10-23 12:12:57 │
|
||||
└─────────────────────────┘
|
||||
@ -1198,7 +1202,7 @@ AS parseDateTimeBestEffort;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─parseDateTimeBestEffort─┐
|
||||
│ 2018-08-18 10:22:16 │
|
||||
└─────────────────────────┘
|
||||
@ -1213,7 +1217,7 @@ AS parseDateTimeBestEffort;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─parseDateTimeBestEffort─┐
|
||||
│ 2015-07-07 12:04:41 │
|
||||
└─────────────────────────┘
|
||||
@ -1228,7 +1232,7 @@ AS parseDateTimeBestEffort;
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─parseDateTimeBestEffort─┐
|
||||
│ 2018-10-23 10:12:12 │
|
||||
└─────────────────────────┘
|
||||
@ -1242,7 +1246,7 @@ SELECT parseDateTimeBestEffort('10 20:19');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─parseDateTimeBestEffort('10 20:19')─┐
|
||||
│ 2000-01-10 20:19:00 │
|
||||
└─────────────────────────────────────┘
|
||||
@ -1376,7 +1380,7 @@ SELECT toLowCardinality('1');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toLowCardinality('1')─┐
|
||||
│ 1 │
|
||||
└───────────────────────┘
|
||||
@ -1419,7 +1423,7 @@ SELECT toUnixTimestamp64Milli(dt64);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toUnixTimestamp64Milli(dt64)─┐
|
||||
│ 1568650812345 │
|
||||
└──────────────────────────────┘
|
||||
@ -1434,7 +1438,7 @@ SELECT toUnixTimestamp64Nano(dt64);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─toUnixTimestamp64Nano(dt64)─┐
|
||||
│ 1568650812345678000 │
|
||||
└─────────────────────────────┘
|
||||
@ -1474,7 +1478,7 @@ SELECT fromUnixTimestamp64Milli(i64, 'UTC');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─fromUnixTimestamp64Milli(i64, 'UTC')─┐
|
||||
│ 2009-02-13 23:31:31.011 │
|
||||
└──────────────────────────────────────┘
|
||||
@ -1510,7 +1514,7 @@ FROM numbers(3);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─formatRow('CSV', number, 'good')─┐
|
||||
│ 0,"good"
|
||||
│
|
||||
@ -1535,7 +1539,7 @@ SETTINGS format_custom_result_before_delimiter='<prefix>\n', format_custom_resul
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─formatRow('CustomSeparated', number, 'good')─┐
|
||||
│ <prefix>
|
||||
0 good
|
||||
@ -1581,7 +1585,7 @@ FROM numbers(3);
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─formatRowNoNewline('CSV', number, 'good')─┐
|
||||
│ 0,"good" │
|
||||
│ 1,"good" │
|
||||
@ -1618,7 +1622,7 @@ SELECT snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
|
||||
┌─snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC')─┐
|
||||
│ 2021-08-15 10:57:56 │
|
||||
@ -1654,7 +1658,7 @@ SELECT snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC');
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
|
||||
┌─snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC')─┐
|
||||
│ 2021-08-15 10:58:19.841 │
|
||||
@ -1689,7 +1693,7 @@ WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt SELECT dateTimeToS
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─dateTimeToSnowflake(dt)─┐
|
||||
│ 1426860702823350272 │
|
||||
└─────────────────────────┘
|
||||
@ -1723,7 +1727,7 @@ WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 SELECT
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─dateTime64ToSnowflake(dt64)─┐
|
||||
│ 1426860704886947840 │
|
||||
└─────────────────────────────┘
|
||||
|
@ -38,7 +38,7 @@ INSERT INTO t_uuid SELECT generateUUIDv4()
|
||||
SELECT * FROM t_uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌────────────────────────────────────x─┐
|
||||
│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │
|
||||
└──────────────────────────────────────┘
|
||||
@ -89,7 +89,7 @@ SELECT empty(generateUUIDv4());
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
```response
|
||||
┌─empty(generateUUIDv4())─┐
|
||||
│ 0 │
|
||||
└─────────────────────────┘
|
||||
@ -131,7 +131,7 @@ SELECT notEmpty(generateUUIDv4());
|
||||
|
||||
Result:
|
||||
|
||||
```text
|
||||
```response
|
||||
┌─notEmpty(generateUUIDv4())─┐
|
||||
│ 1 │
|
||||
└────────────────────────────┘
|
||||
@ -155,12 +155,56 @@ The UUID type value.
|
||||
SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────────────────────────uuid─┐
|
||||
│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toUUIDOrDefault (x,y)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `string` — String of 36 characters or FixedString(36). [String](../../sql-reference/syntax.md#string).
|
||||
- `default` — UUID to be used as the default if the first argument cannot be converted to a UUID type. [UUID](/docs/en/sql-reference/data-types/uuid.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
UUID
|
||||
|
||||
``` sql
|
||||
toUUIDOrDefault(String, UUID)
|
||||
```
|
||||
|
||||
**Returned value**
|
||||
|
||||
The UUID type value.
|
||||
|
||||
**Usage examples**
|
||||
|
||||
This first example returns the first argument converted to a UUID type as it can be converted:
|
||||
|
||||
``` sql
|
||||
SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID));
|
||||
```
|
||||
```response
|
||||
┌─toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', CAST('59f0c404-5cb3-11e7-907b-a6006ad3dba0', 'UUID'))─┐
|
||||
│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │
|
||||
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
This second example returns the second argument (the provided default UUID) as the first argument cannot be converted to a UUID type:
|
||||
|
||||
```sql
|
||||
SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID));
|
||||
```
|
||||
|
||||
```response
|
||||
┌─toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', CAST('59f0c404-5cb3-11e7-907b-a6006ad3dba0', 'UUID'))─┐
|
||||
│ 59f0c404-5cb3-11e7-907b-a6006ad3dba0 │
|
||||
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toUUIDOrNull (x)
|
||||
|
||||
It takes an argument of type String and tries to parse it into UUID. If failed, returns NULL.
|
||||
@ -179,7 +223,7 @@ The Nullable(UUID) type value.
|
||||
SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─uuid─┐
|
||||
│ ᴺᵁᴸᴸ │
|
||||
└──────┘
|
||||
@ -203,7 +247,7 @@ The UUID type value.
|
||||
SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─────────────────────────────────uuid─┐
|
||||
│ 00000000-0000-0000-0000-000000000000 │
|
||||
└──────────────────────────────────────┘
|
||||
@ -236,7 +280,7 @@ SELECT
|
||||
UUIDStringToNum(uuid) AS bytes
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─uuid─────────────────────────────────┬─bytes────────────┐
|
||||
│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │
|
||||
└──────────────────────────────────────┴──────────────────┘
|
||||
@ -248,7 +292,7 @@ SELECT
|
||||
UUIDStringToNum(uuid, 2) AS bytes
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─uuid─────────────────────────────────┬─bytes────────────┐
|
||||
│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ @</a;]~!p{jTj={) │
|
||||
└──────────────────────────────────────┴──────────────────┘
|
||||
@ -281,7 +325,7 @@ SELECT
|
||||
UUIDNumToString(toFixedString(bytes, 16)) AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─bytes────────────┬─uuid─────────────────────────────────┐
|
||||
│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │
|
||||
└──────────────────┴──────────────────────────────────────┘
|
||||
@ -293,7 +337,7 @@ SELECT
|
||||
UUIDNumToString(toFixedString(bytes, 16), 2) AS uuid
|
||||
```
|
||||
|
||||
``` text
|
||||
```response
|
||||
┌─bytes────────────┬─uuid─────────────────────────────────┐
|
||||
│ @</a;]~!p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │
|
||||
└──────────────────┴──────────────────────────────────────┘
|
||||
|
@ -91,11 +91,11 @@ INSERT INTO t FORMAT TabSeparated
|
||||
|
||||
You can insert data separately from the query by using the command-line client or the HTTP interface. For more information, see the section “[Interfaces](../../interfaces)”.
|
||||
|
||||
### Constraints
|
||||
## Constraints
|
||||
|
||||
If table has [constraints](../../sql-reference/statements/create/table.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped.
|
||||
|
||||
### Inserting the Results of `SELECT`
|
||||
## Inserting the Results of `SELECT`
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -114,7 +114,7 @@ However, you can delete old data using `ALTER TABLE ... DROP PARTITION`.
|
||||
|
||||
To insert a default value instead of `NULL` into a column with not nullable data type, enable [insert_null_as_default](../../operations/settings/settings.md#insert_null_as_default) setting.
|
||||
|
||||
### Inserting Data from a File
|
||||
## Inserting Data from a File
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -122,14 +122,15 @@ To insert a default value instead of `NULL` into a column with not nullable data
|
||||
INSERT INTO [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
|
||||
```
|
||||
|
||||
Use the syntax above to insert data from a file stored on a **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.
|
||||
Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.
|
||||
|
||||
Compressed files are supported. Compression type is detected by the extension of the file name. Or it can be explicitly specified in a `COMPRESSION` clause. Supported types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
|
||||
Compressed files are supported. The compression type is detected by the extension of the file name. Or it can be explicitly specified in a `COMPRESSION` clause. Supported types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
|
||||
|
||||
This functionality is available in the [command-line client](../../interfaces/cli.md) and [clickhouse-local](../../operations/utilities/clickhouse-local.md).
|
||||
|
||||
**Example**
|
||||
**Examples**
|
||||
|
||||
### Single file with FROM INFILE
|
||||
Execute the following queries using [command-line client](../../interfaces/cli.md):
|
||||
|
||||
```bash
|
||||
@ -148,7 +149,27 @@ Result:
|
||||
└────┴──────┘
|
||||
```
|
||||
|
||||
### Inserting into Table Function
|
||||
### Multiple files with FROM INFILE using globs
|
||||
|
||||
This example is very similar to the previous one but inserts from multiple files using `FROM INFILE 'input_*.csv`.
|
||||
|
||||
```bash
|
||||
echo 1,A > input_1.csv ; echo 2,B > input_2.csv
|
||||
clickhouse-client --query="CREATE TABLE infile_globs (id UInt32, text String) ENGINE=MergeTree() ORDER BY id;"
|
||||
clickhouse-client --query="INSERT INTO infile_globs FROM INFILE 'input_*.csv' FORMAT CSV;"
|
||||
clickhouse-client --query="SELECT * FROM infile_globs FORMAT PrettyCompact;"
|
||||
```
|
||||
|
||||
:::tip
|
||||
In addition to selecting multiple files with `*`, you can use ranges (`{1,2}` or `{1..9}`) and other [glob substitutions](/docs/en/sql-reference/table-functions/file.md/#globs-in-path). These three all would work with the above example:
|
||||
```sql
|
||||
INSERT INTO infile_globs FROM INFILE 'input_*.csv' FORMAT CSV;
|
||||
INSERT INTO infile_globs FROM INFILE 'input_{1,2}.csv' FORMAT CSV;
|
||||
INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV;
|
||||
```
|
||||
:::
|
||||
|
||||
## Inserting into Table Function
|
||||
|
||||
Data can be inserted into tables referenced by [table functions](../../sql-reference/table-functions/index.md).
|
||||
|
||||
@ -176,7 +197,7 @@ Result:
|
||||
└─────┴───────────────────────┘
|
||||
```
|
||||
|
||||
### Performance Considerations
|
||||
## Performance Considerations
|
||||
|
||||
`INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this:
|
||||
|
||||
|
@ -169,12 +169,6 @@ sudo bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
|
||||
|
||||
cmake -D CMAKE_BUILD_TYPE=Debug ..
|
||||
|
||||
В случае использования на разработческой машине старого HDD или SSD, а также при желании использовать меньше места для артефактов сборки можно использовать следующую команду:
|
||||
```bash
|
||||
cmake -DUSE_DEBUG_HELPERS=1 -DUSE_STATIC_LIBRARIES=0 -DSPLIT_SHARED_LIBRARIES=1 ..
|
||||
```
|
||||
При этом надо учесть, что получаемые в результате сборки исполнимые файлы будут динамически слинкованы с библиотеками, и поэтому фактически станут непереносимыми на другие компьютеры (либо для этого нужно будет предпринять значительно больше усилий по сравнению со статической сборкой). Плюсом же в данном случае является значительно меньшее время сборки (это проявляется не на первой сборке, а на последующих, после внесения изменений в исходный код - тратится меньшее время на линковку по сравнению со статической сборкой) и значительно меньшее использование места на жёстком диске (экономия более, чем в 3 раза по сравнению со статической сборкой). Для целей разработки, когда планируются только отладочные запуски на том же компьютере, где осуществлялась сборка, это может быть наиболее удобным вариантом.
|
||||
|
||||
Вы можете изменить вариант сборки, выполнив новую команду в директории build.
|
||||
|
||||
Запустите ninja для сборки:
|
||||
|
@ -523,7 +523,7 @@ CREATE TABLE example_table
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(d)
|
||||
ORDER BY d
|
||||
TTL d + INTERVAL 1 MONTH [DELETE],
|
||||
TTL d + INTERVAL 1 MONTH DELETE,
|
||||
d + INTERVAL 1 WEEK TO VOLUME 'aaa',
|
||||
d + INTERVAL 2 WEEK TO DISK 'bbb';
|
||||
```
|
||||
|
@ -85,7 +85,6 @@ git push
|
||||
- **Build type**: `Debug` or `RelWithDebInfo` (cmake).
|
||||
- **Sanitizer**: `none` (without sanitizers), `address` (ASan), `memory` (MSan), `undefined` (UBSan), or `thread` (TSan).
|
||||
- **Bundled**: `bundled` 构建使用来自 `contrib` 库, 而 `unbundled` 构建使用系统库.
|
||||
- **Splitted**: `splitted` is a [split build](https://clickhouse.com/docs/en/development/build/#split-build)
|
||||
- **Status**: `成功` 或 `失败`
|
||||
- **Build log**: 链接到构建和文件复制日志, 当构建失败时很有用.
|
||||
- **Build time**.
|
||||
@ -97,7 +96,6 @@ git push
|
||||
- `clickhouse`: Main built binary.
|
||||
- `clickhouse-odbc-bridge`
|
||||
- `unit_tests_dbms`: 带有 ClickHouse 单元测试的 GoogleTest 二进制文件.
|
||||
- `shared_build.tgz`: 使用共享库构建.
|
||||
- `performance.tgz`: 用于性能测试的特殊包.
|
||||
|
||||
## 特殊构建检查 {#special-buildcheck}
|
||||
@ -123,14 +121,6 @@ git push
|
||||
of error.
|
||||
```
|
||||
|
||||
## 冒烟测试 {#split-build-smoke-test}
|
||||
检查[拆分构建](./build.md#split-build)配置中的服务器构建是否可以启动并运行简单查询.如果失败:
|
||||
```
|
||||
* Fix other test errors first;
|
||||
* Build the server in [split build](./build.md#split-build) configuration
|
||||
locally and check whether it can start and run `select 1`.
|
||||
```
|
||||
|
||||
## 兼容性检查 {#compatibility-check}
|
||||
检查`clickhouse`二进制文件是否可以在带有旧libc版本的发行版上运行.如果失败, 请向维护人员寻求帮助.
|
||||
|
||||
|
@ -479,7 +479,7 @@ CREATE TABLE example_table
|
||||
ENGINE = MergeTree
|
||||
PARTITION BY toYYYYMM(d)
|
||||
ORDER BY d
|
||||
TTL d + INTERVAL 1 MONTH [DELETE],
|
||||
TTL d + INTERVAL 1 MONTH DELETE,
|
||||
d + INTERVAL 1 WEEK TO VOLUME 'aaa',
|
||||
d + INTERVAL 2 WEEK TO DISK 'bbb';
|
||||
```
|
||||
|
@ -13,12 +13,6 @@ option (ENABLE_CLICKHOUSE_SERVER "Server mode (main mode)" ${ENABLE_CLICKHOUSE_A
|
||||
option (ENABLE_CLICKHOUSE_CLIENT "Client mode (interactive tui/shell that connects to the server)"
|
||||
${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
# Don't create self-extracting clickhouse for split build
|
||||
if (ENABLE_CLICKHOUSE_SELF_EXTRACTING AND SPLIT_SHARED_LIBRARIES)
|
||||
message (STATUS "Self-extracting on split build is not supported")
|
||||
unset (ENABLE_CLICKHOUSE_SELF_EXTRACTING CACHE)
|
||||
endif ()
|
||||
|
||||
# https://clickhouse.com/docs/en/operations/utilities/clickhouse-local/
|
||||
option (ENABLE_CLICKHOUSE_LOCAL "Local files fast processing mode" ${ENABLE_CLICKHOUSE_ALL})
|
||||
|
||||
@ -173,10 +167,6 @@ else()
|
||||
message(STATUS "ClickHouse keeper-converter mode: OFF")
|
||||
endif()
|
||||
|
||||
if(NOT (USE_STATIC_LIBRARIES OR SPLIT_SHARED_LIBRARIES))
|
||||
set(CLICKHOUSE_ONE_SHARED ON)
|
||||
endif()
|
||||
|
||||
if (ENABLE_CLICKHOUSE_DISKS)
|
||||
message(STATUS "Clickhouse disks mode: ON")
|
||||
else()
|
||||
@ -192,11 +182,7 @@ endif()
|
||||
configure_file (config_tools.h.in ${CONFIG_INCLUDE_PATH}/config_tools.h)
|
||||
|
||||
macro(clickhouse_target_link_split_lib target name)
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
|
||||
else()
|
||||
target_link_libraries(${target} PRIVATE clickhouse-lib)
|
||||
endif()
|
||||
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
|
||||
endmacro()
|
||||
|
||||
macro(clickhouse_program_add_library name)
|
||||
@ -208,18 +194,16 @@ macro(clickhouse_program_add_library name)
|
||||
set(CLICKHOUSE_${name_uc}_LINK ${CLICKHOUSE_${name_uc}_LINK} PARENT_SCOPE)
|
||||
set(CLICKHOUSE_${name_uc}_INCLUDE ${CLICKHOUSE_${name_uc}_INCLUDE} PARENT_SCOPE)
|
||||
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES})
|
||||
add_library(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_SOURCES})
|
||||
|
||||
set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if()
|
||||
if(_link)
|
||||
target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK})
|
||||
endif()
|
||||
set(_link ${CLICKHOUSE_${name_uc}_LINK}) # can't use ${} in if()
|
||||
if(_link)
|
||||
target_link_libraries(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_LINK})
|
||||
endif()
|
||||
|
||||
set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if()
|
||||
if (_include)
|
||||
target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE})
|
||||
endif()
|
||||
set(_include ${CLICKHOUSE_${name_uc}_INCLUDE}) # can't use ${} in if()
|
||||
if (_include)
|
||||
target_include_directories(clickhouse-${name}-lib ${CLICKHOUSE_${name_uc}_INCLUDE})
|
||||
endif()
|
||||
endmacro()
|
||||
|
||||
@ -263,68 +247,8 @@ if (ENABLE_CLICKHOUSE_SELF_EXTRACTING)
|
||||
add_subdirectory (self-extracting)
|
||||
endif ()
|
||||
|
||||
if (CLICKHOUSE_ONE_SHARED)
|
||||
add_library(clickhouse-lib SHARED
|
||||
${CLICKHOUSE_SERVER_SOURCES}
|
||||
${CLICKHOUSE_CLIENT_SOURCES}
|
||||
${CLICKHOUSE_LOCAL_SOURCES}
|
||||
${CLICKHOUSE_BENCHMARK_SOURCES}
|
||||
${CLICKHOUSE_COPIER_SOURCES}
|
||||
${CLICKHOUSE_EXTRACT_FROM_CONFIG_SOURCES}
|
||||
${CLICKHOUSE_COMPRESSOR_SOURCES}
|
||||
${CLICKHOUSE_FORMAT_SOURCES}
|
||||
${CLICKHOUSE_OBFUSCATOR_SOURCES}
|
||||
${CLICKHOUSE_GIT_IMPORT_SOURCES}
|
||||
${CLICKHOUSE_ODBC_BRIDGE_SOURCES}
|
||||
${CLICKHOUSE_KEEPER_SOURCES}
|
||||
${CLICKHOUSE_KEEPER_CONVERTER_SOURCES}
|
||||
${CLICKHOUSE_STATIC_FILES_DISK_UPLOADER_SOURCES}
|
||||
${CLICKHOUSE_SU_SOURCES})
|
||||
|
||||
target_link_libraries(clickhouse-lib
|
||||
${CLICKHOUSE_SERVER_LINK}
|
||||
${CLICKHOUSE_CLIENT_LINK}
|
||||
${CLICKHOUSE_LOCAL_LINK}
|
||||
${CLICKHOUSE_BENCHMARK_LINK}
|
||||
${CLICKHOUSE_COPIER_LINK}
|
||||
${CLICKHOUSE_EXTRACT_FROM_CONFIG_LINK}
|
||||
${CLICKHOUSE_COMPRESSOR_LINK}
|
||||
${CLICKHOUSE_FORMAT_LINK}
|
||||
${CLICKHOUSE_OBFUSCATOR_LINK}
|
||||
${CLICKHOUSE_GIT_IMPORT_LINK}
|
||||
${CLICKHOUSE_ODBC_BRIDGE_LINK}
|
||||
${CLICKHOUSE_KEEPER_LINK}
|
||||
${CLICKHOUSE_KEEPER_CONVERTER_LINK}
|
||||
${CLICKHOUSE_STATIC_FILES_DISK_UPLOADER_LINK}
|
||||
${CLICKHOUSE_SU_LINK})
|
||||
|
||||
target_include_directories(clickhouse-lib
|
||||
${CLICKHOUSE_SERVER_INCLUDE}
|
||||
${CLICKHOUSE_CLIENT_INCLUDE}
|
||||
${CLICKHOUSE_LOCAL_INCLUDE}
|
||||
${CLICKHOUSE_BENCHMARK_INCLUDE}
|
||||
${CLICKHOUSE_COPIER_INCLUDE}
|
||||
${CLICKHOUSE_EXTRACT_FROM_CONFIG_INCLUDE}
|
||||
${CLICKHOUSE_COMPRESSOR_INCLUDE}
|
||||
${CLICKHOUSE_FORMAT_INCLUDE}
|
||||
${CLICKHOUSE_OBFUSCATOR_INCLUDE}
|
||||
${CLICKHOUSE_GIT_IMPORT_INCLUDE}
|
||||
${CLICKHOUSE_ODBC_BRIDGE_INCLUDE}
|
||||
${CLICKHOUSE_KEEPER_INCLUDE}
|
||||
${CLICKHOUSE_KEEPER_CONVERTER_INCLUDE})
|
||||
|
||||
set_target_properties(clickhouse-lib PROPERTIES SOVERSION ${VERSION_MAJOR}.${VERSION_MINOR} VERSION ${VERSION_SO} OUTPUT_NAME clickhouse DEBUG_POSTFIX "")
|
||||
install (TARGETS clickhouse-lib LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT clickhouse)
|
||||
endif()
|
||||
|
||||
clickhouse_add_executable (clickhouse main.cpp)
|
||||
|
||||
if (NOT USE_STATIC_LIBRARIES AND SPLIT_SHARED_LIBRARIES)
|
||||
# Shared split (dev) build: In CI, the server is run with custom LD_LIBRARY_PATH. This makes the harmful env check re-execute the
|
||||
# process in a clean environment but as in CI the containing directory is not included in DT_RUNPATH/DT_RPATH, the server won't come up.
|
||||
target_compile_definitions(clickhouse PRIVATE DISABLE_HARMFUL_ENV_VAR_CHECK)
|
||||
endif ()
|
||||
|
||||
# A library that prevent usage of several functions from libc.
|
||||
if (ARCH_AMD64 AND OS_LINUX AND NOT OS_ANDROID)
|
||||
set (HARMFUL_LIB harmful)
|
||||
|
@ -10,6 +10,4 @@ set (CLICKHOUSE_BENCHMARK_LINK
|
||||
|
||||
clickhouse_program_add(benchmark)
|
||||
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib)
|
||||
endif()
|
||||
target_link_libraries (clickhouse-benchmark-lib PRIVATE clickhouse-client-lib)
|
||||
|
@ -1,12 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Helper for split build mode.
|
||||
# Allows to run commands like
|
||||
# clickhouse client
|
||||
# clickhouse server
|
||||
# ...
|
||||
|
||||
set -e
|
||||
CMD=$1
|
||||
shift
|
||||
clickhouse-$CMD $*
|
@ -14,9 +14,7 @@ set (CLICKHOUSE_LOCAL_LINK
|
||||
|
||||
clickhouse_program_add(local)
|
||||
|
||||
if(NOT CLICKHOUSE_ONE_SHARED)
|
||||
target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
|
||||
endif()
|
||||
target_link_libraries(clickhouse-local-lib PRIVATE clickhouse-server-lib)
|
||||
|
||||
if (TARGET ch_rust::skim)
|
||||
target_link_libraries(clickhouse-local-lib PRIVATE ch_rust::skim)
|
||||
|
@ -345,7 +345,7 @@ struct Checker
|
||||
;
|
||||
|
||||
|
||||
#if !defined(DISABLE_HARMFUL_ENV_VAR_CHECK) && !defined(USE_MUSL)
|
||||
#if !defined(USE_MUSL)
|
||||
/// NOTE: We will migrate to full static linking or our own dynamic loader to make this code obsolete.
|
||||
void checkHarmfulEnvironmentVariables(char ** argv)
|
||||
{
|
||||
@ -457,7 +457,7 @@ int main(int argc_, char ** argv_)
|
||||
/// Note: we forbid dlopen in our code.
|
||||
updatePHDRCache();
|
||||
|
||||
#if !defined(DISABLE_HARMFUL_ENV_VAR_CHECK) && !defined(USE_MUSL)
|
||||
#if !defined(USE_MUSL)
|
||||
checkHarmfulEnvironmentVariables(argv_);
|
||||
#endif
|
||||
|
||||
|
@ -420,6 +420,33 @@ void Server::createServer(
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined(OS_LINUX)
|
||||
namespace
|
||||
{
|
||||
|
||||
void setOOMScore(int value, Poco::Logger * log)
|
||||
{
|
||||
try
|
||||
{
|
||||
std::string value_string = std::to_string(value);
|
||||
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
|
||||
buf.write(value_string.c_str(), value_string.size());
|
||||
buf.next();
|
||||
buf.close();
|
||||
}
|
||||
catch (const Poco::Exception & e)
|
||||
{
|
||||
LOG_WARNING(log, "Failed to adjust OOM score: '{}'.", e.displayText());
|
||||
return;
|
||||
}
|
||||
LOG_INFO(log, "Set OOM score adjustment to {}", value);
|
||||
}
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void Server::uninitialize()
|
||||
{
|
||||
logger().information("shutting down");
|
||||
@ -881,6 +908,21 @@ try
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int default_oom_score = 0;
|
||||
|
||||
#if !defined(NDEBUG)
|
||||
/// In debug version on Linux, increase oom score so that clickhouse is killed
|
||||
/// first, instead of some service. Use a carefully chosen random score of 555:
|
||||
/// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore
|
||||
/// whatever errors that occur, because it's just a debugging aid and we don't
|
||||
/// care if it breaks.
|
||||
default_oom_score = 555;
|
||||
#endif
|
||||
|
||||
int oom_score = config().getInt("oom_score", default_oom_score);
|
||||
if (oom_score)
|
||||
setOOMScore(oom_score, log);
|
||||
#endif
|
||||
|
||||
global_context->setRemoteHostFilter(config());
|
||||
|
@ -1464,4 +1464,8 @@
|
||||
I don't recommend to change this setting.
|
||||
<show_addresses_in_stack_traces>false</show_addresses_in_stack_traces>
|
||||
-->
|
||||
|
||||
<!-- On Linux systems this can control the behavior of OOM killer.
|
||||
<oom_score>-1000</oom_score>
|
||||
-->
|
||||
</clickhouse>
|
||||
|
@ -35,15 +35,7 @@ add_custom_command(OUTPUT ${ffi_binding_final_path}
|
||||
DEPENDS cargo-build__ch_rust_skim_rust)
|
||||
|
||||
add_library(_ch_rust_skim_ffi ${ffi_binding_final_path})
|
||||
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
|
||||
# static
|
||||
else()
|
||||
if (OS_DARWIN)
|
||||
target_link_libraries(_ch_rust_skim_ffi PRIVATE -Wl,-undefined,dynamic_lookup)
|
||||
else()
|
||||
target_link_libraries(_ch_rust_skim_ffi PRIVATE -Wl,--unresolved-symbols=ignore-all)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# cxx bridge compiles such bindings
|
||||
set_target_properties(_ch_rust_skim_ffi PROPERTIES COMPILE_FLAGS "${CXXBRIDGE_CXXFLAGS}")
|
||||
|
||||
|
@ -25,8 +25,6 @@
|
||||
#include <IO/Operators.h>
|
||||
#include <Poco/AccessExpireCache.h>
|
||||
#include <boost/algorithm/string/join.hpp>
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/trim.hpp>
|
||||
#include <re2/re2.h>
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
@ -695,14 +693,7 @@ std::shared_ptr<const ContextAccess> AccessControl::getContextAccess(
|
||||
|
||||
/// Extract the last entry from comma separated list of X-Forwarded-For addresses.
|
||||
/// Only the last proxy can be trusted (if any).
|
||||
Strings forwarded_addresses;
|
||||
boost::split(forwarded_addresses, client_info.forwarded_for, boost::is_any_of(","));
|
||||
if (!forwarded_addresses.empty())
|
||||
{
|
||||
String & last_forwarded_address = forwarded_addresses.back();
|
||||
boost::trim(last_forwarded_address);
|
||||
params.forwarded_address = last_forwarded_address;
|
||||
}
|
||||
params.forwarded_address = client_info.getLastForwardedFor();
|
||||
|
||||
return getContextAccess(params);
|
||||
}
|
||||
|
@ -11,11 +11,7 @@ if(COMPILER_PIPE)
|
||||
else()
|
||||
set(MAX_COMPILER_MEMORY 1500)
|
||||
endif()
|
||||
if(USE_STATIC_LIBRARIES)
|
||||
set(MAX_LINKER_MEMORY 3500)
|
||||
else()
|
||||
set(MAX_LINKER_MEMORY 2500)
|
||||
endif()
|
||||
set(MAX_LINKER_MEMORY 3500)
|
||||
include(../cmake/limit_jobs.cmake)
|
||||
|
||||
include (../cmake/version.cmake)
|
||||
@ -200,10 +196,6 @@ endif ()
|
||||
|
||||
add_library(clickhouse_common_io ${clickhouse_common_io_headers} ${clickhouse_common_io_sources})
|
||||
|
||||
if (SPLIT_SHARED_LIBRARIES)
|
||||
target_compile_definitions(clickhouse_common_io PRIVATE SPLIT_SHARED_LIBRARIES)
|
||||
endif ()
|
||||
|
||||
add_library (clickhouse_malloc OBJECT Common/malloc.cpp)
|
||||
set_source_files_properties(Common/malloc.cpp PROPERTIES COMPILE_FLAGS "-fno-builtin")
|
||||
|
||||
@ -227,18 +219,7 @@ add_subdirectory(Common/Config)
|
||||
|
||||
set (all_modules)
|
||||
macro(add_object_library name common_path)
|
||||
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
|
||||
add_headers_and_sources(dbms ${common_path})
|
||||
else ()
|
||||
list (APPEND all_modules ${name})
|
||||
add_headers_and_sources(${name} ${common_path})
|
||||
add_library(${name} SHARED ${${name}_sources} ${${name}_headers})
|
||||
if (OS_DARWIN)
|
||||
target_link_libraries (${name} PRIVATE -Wl,-undefined,dynamic_lookup)
|
||||
else()
|
||||
target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all)
|
||||
endif()
|
||||
endif ()
|
||||
add_headers_and_sources(dbms ${common_path})
|
||||
endmacro()
|
||||
|
||||
add_object_library(clickhouse_access Access)
|
||||
@ -297,28 +278,12 @@ if (TARGET ch_contrib::nuraft)
|
||||
add_object_library(clickhouse_coordination Coordination)
|
||||
endif()
|
||||
|
||||
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
|
||||
add_library (dbms STATIC ${dbms_headers} ${dbms_sources})
|
||||
target_link_libraries (dbms PRIVATE ch_contrib::libdivide)
|
||||
if (TARGET ch_contrib::jemalloc)
|
||||
target_link_libraries (dbms PRIVATE ch_contrib::jemalloc)
|
||||
endif()
|
||||
set (all_modules dbms)
|
||||
else()
|
||||
add_library (dbms SHARED ${dbms_headers} ${dbms_sources})
|
||||
target_link_libraries (dbms PUBLIC ${all_modules})
|
||||
target_link_libraries (clickhouse_interpreters PRIVATE ch_contrib::libdivide)
|
||||
if (TARGET ch_contrib::jemalloc)
|
||||
target_link_libraries (clickhouse_interpreters PRIVATE ch_contrib::jemalloc)
|
||||
endif()
|
||||
list (APPEND all_modules dbms)
|
||||
# force all split libs to be linked
|
||||
if (OS_DARWIN)
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-undefined,error")
|
||||
else()
|
||||
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed")
|
||||
endif()
|
||||
endif ()
|
||||
add_library (dbms STATIC ${dbms_headers} ${dbms_sources})
|
||||
target_link_libraries (dbms PRIVATE ch_contrib::libdivide)
|
||||
if (TARGET ch_contrib::jemalloc)
|
||||
target_link_libraries (dbms PRIVATE ch_contrib::jemalloc)
|
||||
endif()
|
||||
set (all_modules dbms)
|
||||
|
||||
macro (dbms_target_include_directories)
|
||||
foreach (module ${all_modules})
|
||||
@ -615,13 +580,7 @@ if (ENABLE_TESTS)
|
||||
|
||||
# gtest framework has substandard code
|
||||
target_compile_options(unit_tests_dbms PRIVATE
|
||||
-Wno-zero-as-null-pointer-constant
|
||||
-Wno-covered-switch-default
|
||||
-Wno-undef
|
||||
-Wno-sign-compare
|
||||
-Wno-used-but-marked-unused
|
||||
-Wno-missing-noreturn
|
||||
-Wno-gnu-zero-variadic-macro-arguments
|
||||
)
|
||||
|
||||
target_link_libraries(unit_tests_dbms PRIVATE
|
||||
|
@ -109,8 +109,7 @@ public:
|
||||
template <typename... Args>
|
||||
[[nodiscard]] bool emplace(Args &&... args)
|
||||
{
|
||||
emplaceImpl(std::nullopt /* timeout in milliseconds */, std::forward<Args...>(args...));
|
||||
return true;
|
||||
return emplaceImpl(std::nullopt /* timeout in milliseconds */, std::forward<Args...>(args...));
|
||||
}
|
||||
|
||||
/// Returns false if queue is finished and empty
|
||||
|
@ -48,6 +48,30 @@ struct ClearableHashTableCell : public BaseCell
|
||||
ClearableHashTableCell(const Key & key_, const State & state) : BaseCell(key_, state), version(state.version) {}
|
||||
};
|
||||
|
||||
using StringRefBaseCell = HashSetCellWithSavedHash<StringRef, DefaultHash<StringRef>, ClearableHashSetState>;
|
||||
|
||||
/// specialization for StringRef to allow zero size key (empty string)
|
||||
template <>
|
||||
struct ClearableHashTableCell<StringRef, StringRefBaseCell> : public StringRefBaseCell
|
||||
{
|
||||
using State = ClearableHashSetState;
|
||||
using value_type = typename StringRefBaseCell::value_type;
|
||||
|
||||
UInt32 version;
|
||||
|
||||
bool isZero(const State & state) const { return version != state.version; }
|
||||
static bool isZero(const StringRef & key_, const State & state_) { return StringRefBaseCell::isZero(key_, state_); }
|
||||
|
||||
/// Set the key value to zero.
|
||||
void setZero() { version = 0; }
|
||||
|
||||
/// Do I need to store the zero key separately (that is, can a zero key be inserted into the hash table).
|
||||
static constexpr bool need_zero_value_storage = true;
|
||||
|
||||
ClearableHashTableCell() { } //-V730 /// NOLINT
|
||||
ClearableHashTableCell(const StringRef & key_, const State & state) : StringRefBaseCell(key_, state), version(state.version) { }
|
||||
};
|
||||
|
||||
template <
|
||||
typename Key,
|
||||
typename Hash = DefaultHash<Key>,
|
||||
|
@ -352,7 +352,8 @@ time_t getModificationTime(const std::string & path)
|
||||
struct stat st;
|
||||
if (stat(path.c_str(), &st) == 0)
|
||||
return st.st_mtime;
|
||||
DB::throwFromErrnoWithPath("Cannot check modification time for file: " + path, path, DB::ErrorCodes::CANNOT_STAT);
|
||||
std::error_code m_ec(errno, std::generic_category());
|
||||
throw fs::filesystem_error("Cannot check modification time for file", path, m_ec);
|
||||
}
|
||||
|
||||
time_t getChangeTime(const std::string & path)
|
||||
@ -360,7 +361,8 @@ time_t getChangeTime(const std::string & path)
|
||||
struct stat st;
|
||||
if (stat(path.c_str(), &st) == 0)
|
||||
return st.st_ctime;
|
||||
DB::throwFromErrnoWithPath("Cannot check change time for file: " + path, path, DB::ErrorCodes::CANNOT_STAT);
|
||||
std::error_code m_ec(errno, std::generic_category());
|
||||
throw fs::filesystem_error("Cannot check change time for file", path, m_ec);
|
||||
}
|
||||
|
||||
Poco::Timestamp getModificationTimestamp(const std::string & path)
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "remapExecutable.h"
|
||||
|
||||
#if defined(OS_LINUX) && defined(__amd64__) && defined(__SSE2__) && !defined(SANITIZER) && defined(NDEBUG) && !defined(SPLIT_SHARED_LIBRARIES)
|
||||
#if defined(OS_LINUX) && defined(__amd64__) && defined(__SSE2__) && !defined(SANITIZER) && defined(NDEBUG)
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
@ -1203,7 +1203,7 @@ auto DDperformanceTestSequence()
|
||||
+ generateSeq<ValueType>(G(SameValueGenerator(42)), 0, times); // best
|
||||
}
|
||||
|
||||
// prime numbers in ascending order with some random repitions hit all the cases of Gorilla.
|
||||
// prime numbers in ascending order with some random repetitions hit all the cases of Gorilla.
|
||||
auto PrimesWithMultiplierGenerator = [](int multiplier = 1)
|
||||
{
|
||||
return [multiplier](auto i)
|
||||
|
@ -296,7 +296,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
|
||||
return true;
|
||||
}
|
||||
|
||||
void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async)
|
||||
void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async, const MultiVersion<Macros>::Version & macros)
|
||||
{
|
||||
LOG_DEBUG(log, "Initializing storage dispatcher");
|
||||
|
||||
@ -307,7 +307,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
|
||||
responses_thread = ThreadFromGlobalPool([this] { responseThread(); });
|
||||
snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); });
|
||||
|
||||
snapshot_s3.startup(config);
|
||||
snapshot_s3.startup(config, macros);
|
||||
|
||||
server = std::make_unique<KeeperServer>(configuration_and_settings, config, responses_queue, snapshots_queue, snapshot_s3);
|
||||
|
||||
@ -687,7 +687,7 @@ bool KeeperDispatcher::isServerActive() const
|
||||
return checkInit() && hasLeader() && !server->isRecovering();
|
||||
}
|
||||
|
||||
void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config)
|
||||
void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
|
||||
{
|
||||
auto diff = server->getConfigurationDiff(config);
|
||||
if (diff.empty())
|
||||
@ -704,7 +704,7 @@ void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfigurati
|
||||
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue");
|
||||
}
|
||||
|
||||
snapshot_s3.updateS3Configuration(config);
|
||||
snapshot_s3.updateS3Configuration(config, macros);
|
||||
}
|
||||
|
||||
void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms)
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <Coordination/Keeper4LWInfo.h>
|
||||
#include <Coordination/KeeperConnectionStats.h>
|
||||
#include <Coordination/KeeperSnapshotManagerS3.h>
|
||||
#include <Common/MultiVersion.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -109,7 +111,8 @@ public:
|
||||
|
||||
/// Initialization from config.
|
||||
/// standalone_keeper -- we are standalone keeper application (not inside clickhouse server)
|
||||
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async);
|
||||
/// 'macros' are used to substitute macros in endpoint of disks
|
||||
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async, const MultiVersion<Macros>::Version & macros);
|
||||
|
||||
void startServer();
|
||||
|
||||
@ -124,7 +127,8 @@ public:
|
||||
|
||||
/// Registered in ConfigReloader callback. Add new configuration changes to
|
||||
/// update_configuration_queue. Keeper Dispatcher apply them asynchronously.
|
||||
void updateConfiguration(const Poco::Util::AbstractConfiguration & config);
|
||||
/// 'macros' are used to substitute macros in endpoint of disks
|
||||
void updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
|
||||
|
||||
/// Shutdown internal keeper parts (server, state machine, log storage, etc)
|
||||
void shutdown();
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <IO/S3/PocoHTTPClient.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <IO/copyData.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
#include <aws/core/auth/AWSCredentials.h>
|
||||
#include <aws/s3/S3Client.h>
|
||||
@ -47,7 +48,7 @@ KeeperSnapshotManagerS3::KeeperSnapshotManagerS3()
|
||||
, uuid(UUIDHelpers::generateV4())
|
||||
{}
|
||||
|
||||
void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config)
|
||||
void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
|
||||
{
|
||||
try
|
||||
{
|
||||
@ -64,7 +65,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
|
||||
|
||||
auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config);
|
||||
|
||||
auto endpoint = config.getString(config_prefix + ".endpoint");
|
||||
String endpoint = macros->expand(config.getString(config_prefix + ".endpoint"));
|
||||
auto new_uri = S3::URI{endpoint};
|
||||
|
||||
{
|
||||
@ -261,9 +262,9 @@ void KeeperSnapshotManagerS3::uploadSnapshot(const std::string & path, bool asyn
|
||||
uploadSnapshotImpl(path);
|
||||
}
|
||||
|
||||
void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config)
|
||||
void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
|
||||
{
|
||||
updateS3Configuration(config);
|
||||
updateS3Configuration(config, macros);
|
||||
snapshot_s3_thread = ThreadFromGlobalPool([this] { snapshotS3Thread(); });
|
||||
}
|
||||
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include "config.h"
|
||||
|
||||
#include <Poco/Util/AbstractConfiguration.h>
|
||||
#include <Common/MultiVersion.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
#if USE_AWS_S3
|
||||
#include <Common/ConcurrentBoundedQueue.h>
|
||||
@ -21,10 +23,12 @@ class KeeperSnapshotManagerS3
|
||||
public:
|
||||
KeeperSnapshotManagerS3();
|
||||
|
||||
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config);
|
||||
/// 'macros' are used to substitute macros in endpoint of disks
|
||||
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
|
||||
void uploadSnapshot(const std::string & path, bool async_upload = true);
|
||||
|
||||
void startup(const Poco::Util::AbstractConfiguration & config);
|
||||
/// 'macros' are used to substitute macros in endpoint of disks
|
||||
void startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
|
||||
void shutdown();
|
||||
private:
|
||||
using SnapshotS3Queue = ConcurrentBoundedQueue<std::string>;
|
||||
@ -56,10 +60,10 @@ class KeeperSnapshotManagerS3
|
||||
public:
|
||||
KeeperSnapshotManagerS3() = default;
|
||||
|
||||
void updateS3Configuration(const Poco::Util::AbstractConfiguration &) {}
|
||||
void updateS3Configuration(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
|
||||
void uploadSnapshot(const std::string &, [[maybe_unused]] bool async_upload = true) {}
|
||||
|
||||
void startup(const Poco::Util::AbstractConfiguration &) {}
|
||||
void startup(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
|
||||
|
||||
void shutdown() {}
|
||||
};
|
||||
|
@ -36,7 +36,12 @@ void TinyContext::initializeKeeperDispatcher([[maybe_unused]] bool start_async)
|
||||
if (config_ref.has("keeper_server"))
|
||||
{
|
||||
keeper_dispatcher = std::make_shared<KeeperDispatcher>();
|
||||
keeper_dispatcher->initialize(config_ref, true, start_async);
|
||||
|
||||
MultiVersion<Macros>::Version macros;
|
||||
|
||||
if (config_ref.has("macros"))
|
||||
macros = std::make_unique<Macros>(config_ref, "macros", &Poco::Logger::get("TinyContext"));
|
||||
keeper_dispatcher->initialize(config_ref, true, start_async, macros);
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,7 +76,12 @@ void TinyContext::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::A
|
||||
if (!keeper_dispatcher)
|
||||
return;
|
||||
|
||||
keeper_dispatcher->updateConfiguration(config_);
|
||||
MultiVersion<Macros>::Version macros;
|
||||
|
||||
if (config_.has("macros"))
|
||||
macros = std::make_unique<Macros>(config_, "macros", &Poco::Logger::get("TinyContext"));
|
||||
|
||||
keeper_dispatcher->updateConfiguration(config_, macros);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -602,34 +602,6 @@ void BaseDaemon::closeFDs()
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
/// In debug version on Linux, increase oom score so that clickhouse is killed
|
||||
/// first, instead of some service. Use a carefully chosen random score of 555:
|
||||
/// the maximum is 1000, and chromium uses 300 for its tab processes. Ignore
|
||||
/// whatever errors that occur, because it's just a debugging aid and we don't
|
||||
/// care if it breaks.
|
||||
#if defined(OS_LINUX) && !defined(NDEBUG)
|
||||
void debugIncreaseOOMScore()
|
||||
{
|
||||
const std::string new_score = "555";
|
||||
try
|
||||
{
|
||||
DB::WriteBufferFromFile buf("/proc/self/oom_score_adj");
|
||||
buf.write(new_score.c_str(), new_score.size());
|
||||
buf.close();
|
||||
}
|
||||
catch (const Poco::Exception & e)
|
||||
{
|
||||
LOG_WARNING(&Poco::Logger::root(), "Failed to adjust OOM score: '{}'.", e.displayText());
|
||||
return;
|
||||
}
|
||||
LOG_INFO(&Poco::Logger::root(), "Set OOM score adjustment to {}", new_score);
|
||||
}
|
||||
#else
|
||||
void debugIncreaseOOMScore() {}
|
||||
#endif
|
||||
}
|
||||
|
||||
void BaseDaemon::initialize(Application & self)
|
||||
{
|
||||
@ -796,7 +768,6 @@ void BaseDaemon::initialize(Application & self)
|
||||
|
||||
initializeTerminationAndSignalProcessing();
|
||||
logRevision();
|
||||
debugIncreaseOOMScore();
|
||||
|
||||
for (const auto & key : DB::getMultipleKeysFromConfig(config(), "", "graphite"))
|
||||
{
|
||||
|
@ -7,10 +7,6 @@ add_library (daemon
|
||||
GitHash.generated.cpp
|
||||
)
|
||||
|
||||
if (OS_DARWIN AND NOT USE_STATIC_LIBRARIES)
|
||||
target_link_libraries (daemon PUBLIC -Wl,-undefined,dynamic_lookup)
|
||||
endif()
|
||||
|
||||
target_link_libraries (daemon PUBLIC loggers common PRIVATE clickhouse_parsers clickhouse_common_io clickhouse_common_config)
|
||||
|
||||
if (TARGET ch_contrib::sentry)
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include <Common/Exception.h>
|
||||
|
||||
#define MAX_FIXEDSTRING_SIZE 0xFFFFFF
|
||||
#define MAX_FIXEDSTRING_SIZE_WITHOUT_SUSPICIOUS 256
|
||||
|
||||
|
||||
namespace DB
|
||||
|
@ -539,11 +539,19 @@ String DatabaseOnDisk::getObjectMetadataPath(const String & object_name) const
|
||||
time_t DatabaseOnDisk::getObjectMetadataModificationTime(const String & object_name) const
|
||||
{
|
||||
String table_metadata_path = getObjectMetadataPath(object_name);
|
||||
|
||||
if (fs::exists(table_metadata_path))
|
||||
try
|
||||
{
|
||||
return FS::getModificationTime(table_metadata_path);
|
||||
else
|
||||
return static_cast<time_t>(0);
|
||||
}
|
||||
catch (const fs::filesystem_error & e)
|
||||
{
|
||||
if (e.code() == std::errc::no_such_file_or_directory)
|
||||
{
|
||||
return static_cast<time_t>(0);
|
||||
}
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void DatabaseOnDisk::iterateMetadataFiles(ContextPtr local_context, const IteratingFunction & process_metadata_file) const
|
||||
|
@ -1,36 +1,39 @@
|
||||
#include <DataTypes/DataTypeString.h>
|
||||
#include <Databases/DatabaseReplicated.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/executeQuery.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include <Backups/IRestoreCoordination.h>
|
||||
#include <Backups/RestorerFromBackup.h>
|
||||
#include <base/chrono_io.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Common/Exception.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Common/OpenTelemetryTraceContext.h>
|
||||
#include <Common/ZooKeeper/KeeperException.h>
|
||||
#include <Common/ZooKeeper/Types.h>
|
||||
#include <Common/ZooKeeper/ZooKeeper.h>
|
||||
#include <Databases/DatabaseReplicated.h>
|
||||
#include <Databases/DatabaseReplicatedWorker.h>
|
||||
#include <Interpreters/DDLTask.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
#include <Databases/DDLDependencyVisitor.h>
|
||||
#include <Databases/TablesDependencyGraph.h>
|
||||
#include <Interpreters/Cluster.h>
|
||||
#include <base/getFQDNOrHostName.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Interpreters/DDLTask.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Interpreters/executeDDLQueryOnCluster.h>
|
||||
#include <Interpreters/executeQuery.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/ReadBufferFromString.h>
|
||||
#include <IO/ReadHelpers.h>
|
||||
#include <IO/WriteHelpers.h>
|
||||
#include <Parsers/ASTAlterQuery.h>
|
||||
#include <Parsers/ASTDropQuery.h>
|
||||
#include <Parsers/ASTFunction.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Interpreters/InterpreterCreateQuery.h>
|
||||
#include <Interpreters/evaluateConstantExpression.h>
|
||||
#include <Parsers/formatAST.h>
|
||||
#include <Backups/IRestoreCoordination.h>
|
||||
#include <Backups/RestorerFromBackup.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <base/chrono_io.h>
|
||||
|
||||
#include <utility>
|
||||
#include <Parsers/parseQuery.h>
|
||||
#include <Parsers/ParserCreateQuery.h>
|
||||
#include <Parsers/queryToString.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -905,31 +908,37 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
|
||||
for (const auto & id : dropped_tables)
|
||||
DatabaseCatalog::instance().waitTableFinallyDropped(id);
|
||||
|
||||
/// FIXME: Use proper dependency calculation instead of just moving MV to the end
|
||||
using NameToMetadata = std::pair<String, String>;
|
||||
std::vector<NameToMetadata> table_name_to_metadata_sorted;
|
||||
table_name_to_metadata_sorted.reserve(table_name_to_metadata.size());
|
||||
std::move(table_name_to_metadata.begin(), table_name_to_metadata.end(), std::back_inserter(table_name_to_metadata_sorted));
|
||||
std::sort(table_name_to_metadata_sorted.begin(), table_name_to_metadata_sorted.end(), [](const NameToMetadata & lhs, const NameToMetadata & rhs) -> bool
|
||||
{
|
||||
const bool is_materialized_view_lhs = lhs.second.find("MATERIALIZED VIEW") != std::string::npos;
|
||||
const bool is_materialized_view_rhs = rhs.second.find("MATERIALIZED VIEW") != std::string::npos;
|
||||
return is_materialized_view_lhs < is_materialized_view_rhs;
|
||||
});
|
||||
|
||||
for (const auto & name_and_meta : table_name_to_metadata_sorted)
|
||||
/// Create all needed tables in a proper order
|
||||
TablesDependencyGraph tables_dependencies("DatabaseReplicated (" + getDatabaseName() + ")");
|
||||
for (const auto & [table_name, create_table_query] : table_name_to_metadata)
|
||||
{
|
||||
if (isTableExist(name_and_meta.first, getContext()))
|
||||
/// Note that table_name could contain a dot inside (e.g. .inner.1234-1234-1234-1234)
|
||||
/// And QualifiedTableName::parseFromString doesn't handle this.
|
||||
auto qualified_name = QualifiedTableName{.database = getDatabaseName(), .table = table_name};
|
||||
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_table_query);
|
||||
tables_dependencies.addDependencies(qualified_name, getDependenciesFromCreateQuery(getContext(), qualified_name, query_ast));
|
||||
}
|
||||
|
||||
tables_dependencies.checkNoCyclicDependencies();
|
||||
auto tables_to_create = tables_dependencies.getTablesSortedByDependency();
|
||||
|
||||
for (const auto & table_id : tables_to_create)
|
||||
{
|
||||
auto table_name = table_id.getTableName();
|
||||
auto create_query_string = table_name_to_metadata[table_name];
|
||||
if (isTableExist(table_name, getContext()))
|
||||
{
|
||||
assert(name_and_meta.second == readMetadataFile(name_and_meta.first));
|
||||
assert(create_query_string == readMetadataFile(table_name));
|
||||
continue;
|
||||
}
|
||||
|
||||
auto query_ast = parseQueryFromMetadataInZooKeeper(name_and_meta.first, name_and_meta.second);
|
||||
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_query_string);
|
||||
LOG_INFO(log, "Executing {}", serializeAST(*query_ast));
|
||||
auto create_query_context = make_query_context();
|
||||
InterpreterCreateQuery(query_ast, create_query_context).execute();
|
||||
}
|
||||
LOG_INFO(log, "All tables are created successfully");
|
||||
|
||||
if (max_log_ptr_at_creation != 0)
|
||||
{
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <Functions/FunctionHelpers.h>
|
||||
#include <Interpreters/castColumn.h>
|
||||
|
||||
#include <Dictionaries/DictionaryFactory.h>
|
||||
#include <Dictionaries/DictionarySource.h>
|
||||
|
||||
|
||||
@ -1017,91 +1016,7 @@ Pipe RangeHashedDictionary<dictionary_key_type>::read(const Names & column_names
|
||||
return result;
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
static DictionaryPtr createRangeHashedDictionary(const std::string & full_name,
|
||||
const DictionaryStructure & dict_struct,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_prefix,
|
||||
DictionarySourcePtr source_ptr)
|
||||
{
|
||||
static constexpr auto layout_name = dictionary_key_type == DictionaryKeyType::Simple ? "range_hashed" : "complex_key_range_hashed";
|
||||
|
||||
if constexpr (dictionary_key_type == DictionaryKeyType::Simple)
|
||||
{
|
||||
if (dict_struct.key)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is not supported for dictionary of layout 'range_hashed'");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (dict_struct.id)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is not supported for dictionary of layout 'complex_key_range_hashed'");
|
||||
}
|
||||
|
||||
if (!dict_struct.range_min || !dict_struct.range_max)
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"{}: dictionary of layout '{}' requires .structure.range_min and .structure.range_max",
|
||||
full_name,
|
||||
layout_name);
|
||||
|
||||
const auto dict_id = StorageID::fromDictionaryConfig(config, config_prefix);
|
||||
const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"};
|
||||
const bool require_nonempty = config.getBool(config_prefix + ".require_nonempty", false);
|
||||
|
||||
String dictionary_layout_prefix = config_prefix + ".layout." + layout_name;
|
||||
const bool convert_null_range_bound_to_open = config.getBool(dictionary_layout_prefix + ".convert_null_range_bound_to_open", true);
|
||||
String range_lookup_strategy = config.getString(dictionary_layout_prefix + ".range_lookup_strategy", "min");
|
||||
RangeHashedDictionaryLookupStrategy lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
|
||||
|
||||
if (range_lookup_strategy == "min")
|
||||
lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
|
||||
else if (range_lookup_strategy == "max")
|
||||
lookup_strategy = RangeHashedDictionaryLookupStrategy::max;
|
||||
|
||||
RangeHashedDictionaryConfiguration configuration
|
||||
{
|
||||
.convert_null_range_bound_to_open = convert_null_range_bound_to_open,
|
||||
.lookup_strategy = lookup_strategy,
|
||||
.require_nonempty = require_nonempty
|
||||
};
|
||||
|
||||
DictionaryPtr result = std::make_unique<RangeHashedDictionary<dictionary_key_type>>(
|
||||
dict_id,
|
||||
dict_struct,
|
||||
std::move(source_ptr),
|
||||
dict_lifetime,
|
||||
configuration);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void registerDictionaryRangeHashed(DictionaryFactory & factory)
|
||||
{
|
||||
auto create_layout_simple = [=](const std::string & full_name,
|
||||
const DictionaryStructure & dict_struct,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_prefix,
|
||||
DictionarySourcePtr source_ptr,
|
||||
ContextPtr /* global_context */,
|
||||
bool /*created_from_ddl*/) -> DictionaryPtr
|
||||
{
|
||||
return createRangeHashedDictionary<DictionaryKeyType::Simple>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
|
||||
};
|
||||
|
||||
factory.registerLayout("range_hashed", create_layout_simple, false);
|
||||
|
||||
auto create_layout_complex = [=](const std::string & full_name,
|
||||
const DictionaryStructure & dict_struct,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_prefix,
|
||||
DictionarySourcePtr source_ptr,
|
||||
ContextPtr /* context */,
|
||||
bool /*created_from_ddl*/) -> DictionaryPtr
|
||||
{
|
||||
return createRangeHashedDictionary<DictionaryKeyType::Complex>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
|
||||
};
|
||||
|
||||
factory.registerLayout("complex_key_range_hashed", create_layout_complex, true);
|
||||
}
|
||||
template class RangeHashedDictionary<DictionaryKeyType::Simple>;
|
||||
template class RangeHashedDictionary<DictionaryKeyType::Complex>;
|
||||
|
||||
}
|
||||
|
@ -248,4 +248,7 @@ private:
|
||||
Arena string_arena;
|
||||
};
|
||||
|
||||
extern template class RangeHashedDictionary<DictionaryKeyType::Simple>;
|
||||
extern template class RangeHashedDictionary<DictionaryKeyType::Complex>;
|
||||
|
||||
}
|
||||
|
101
src/Dictionaries/registerRangeHashedDictionary.cpp
Normal file
101
src/Dictionaries/registerRangeHashedDictionary.cpp
Normal file
@ -0,0 +1,101 @@
|
||||
#include "RangeHashedDictionary.h"
|
||||
#include <Dictionaries/DictionarySource.h>
|
||||
#include <Dictionaries/DictionaryFactory.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int UNSUPPORTED_METHOD;
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
template <DictionaryKeyType dictionary_key_type>
|
||||
static DictionaryPtr createRangeHashedDictionary(const std::string & full_name,
|
||||
const DictionaryStructure & dict_struct,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_prefix,
|
||||
DictionarySourcePtr source_ptr)
|
||||
{
|
||||
static constexpr auto layout_name = dictionary_key_type == DictionaryKeyType::Simple ? "range_hashed" : "complex_key_range_hashed";
|
||||
|
||||
if constexpr (dictionary_key_type == DictionaryKeyType::Simple)
|
||||
{
|
||||
if (dict_struct.key)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'key' is not supported for dictionary of layout 'range_hashed'");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (dict_struct.id)
|
||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "'id' is not supported for dictionary of layout 'complex_key_range_hashed'");
|
||||
}
|
||||
|
||||
if (!dict_struct.range_min || !dict_struct.range_max)
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"{}: dictionary of layout '{}' requires .structure.range_min and .structure.range_max",
|
||||
full_name,
|
||||
layout_name);
|
||||
|
||||
const auto dict_id = StorageID::fromDictionaryConfig(config, config_prefix);
|
||||
const DictionaryLifetime dict_lifetime{config, config_prefix + ".lifetime"};
|
||||
const bool require_nonempty = config.getBool(config_prefix + ".require_nonempty", false);
|
||||
|
||||
String dictionary_layout_prefix = config_prefix + ".layout." + layout_name;
|
||||
const bool convert_null_range_bound_to_open = config.getBool(dictionary_layout_prefix + ".convert_null_range_bound_to_open", true);
|
||||
String range_lookup_strategy = config.getString(dictionary_layout_prefix + ".range_lookup_strategy", "min");
|
||||
RangeHashedDictionaryLookupStrategy lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
|
||||
|
||||
if (range_lookup_strategy == "min")
|
||||
lookup_strategy = RangeHashedDictionaryLookupStrategy::min;
|
||||
else if (range_lookup_strategy == "max")
|
||||
lookup_strategy = RangeHashedDictionaryLookupStrategy::max;
|
||||
|
||||
RangeHashedDictionaryConfiguration configuration
|
||||
{
|
||||
.convert_null_range_bound_to_open = convert_null_range_bound_to_open,
|
||||
.lookup_strategy = lookup_strategy,
|
||||
.require_nonempty = require_nonempty
|
||||
};
|
||||
|
||||
DictionaryPtr result = std::make_unique<RangeHashedDictionary<dictionary_key_type>>(
|
||||
dict_id,
|
||||
dict_struct,
|
||||
std::move(source_ptr),
|
||||
dict_lifetime,
|
||||
configuration);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void registerDictionaryRangeHashed(DictionaryFactory & factory)
|
||||
{
|
||||
auto create_layout_simple = [=](const std::string & full_name,
|
||||
const DictionaryStructure & dict_struct,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_prefix,
|
||||
DictionarySourcePtr source_ptr,
|
||||
ContextPtr /* global_context */,
|
||||
bool /*created_from_ddl*/) -> DictionaryPtr
|
||||
{
|
||||
return createRangeHashedDictionary<DictionaryKeyType::Simple>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
|
||||
};
|
||||
|
||||
factory.registerLayout("range_hashed", create_layout_simple, false);
|
||||
|
||||
auto create_layout_complex = [=](const std::string & full_name,
|
||||
const DictionaryStructure & dict_struct,
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & config_prefix,
|
||||
DictionarySourcePtr source_ptr,
|
||||
ContextPtr /* context */,
|
||||
bool /*created_from_ddl*/) -> DictionaryPtr
|
||||
{
|
||||
return createRangeHashedDictionary<DictionaryKeyType::Complex>(full_name, dict_struct, config, config_prefix, std::move(source_ptr));
|
||||
};
|
||||
|
||||
factory.registerLayout("complex_key_range_hashed", create_layout_complex, true);
|
||||
}
|
||||
|
||||
}
|
@ -4,6 +4,7 @@
|
||||
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
|
||||
#include <Disks/DiskFactory.h>
|
||||
#include <Storages/HDFS/HDFSCommon.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -22,7 +23,8 @@ void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check)
|
||||
ContextPtr context,
|
||||
const DisksMap & /*map*/) -> DiskPtr
|
||||
{
|
||||
String uri{config.getString(config_prefix + ".endpoint")};
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
String uri{endpoint};
|
||||
checkHDFSURL(uri);
|
||||
|
||||
if (uri.back() != '/')
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include <Disks/ObjectStorages/S3/S3ObjectStorage.h>
|
||||
#include <Common/ProfileEvents.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
|
||||
#if USE_AWS_S3
|
||||
|
||||
@ -31,6 +33,7 @@
|
||||
#include <Common/StringUtils/StringUtils.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <Common/MultiVersion.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
|
||||
namespace ProfileEvents
|
||||
@ -634,10 +637,11 @@ std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(
|
||||
{
|
||||
auto new_s3_settings = getSettings(config, config_prefix, context);
|
||||
auto new_client = getClient(config, config_prefix, context, *new_s3_settings);
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
return std::make_unique<S3ObjectStorage>(
|
||||
std::move(new_client), std::move(new_s3_settings),
|
||||
version_id, s3_capabilities, new_namespace,
|
||||
config.getString(config_prefix + ".endpoint"));
|
||||
endpoint);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <Disks/ObjectStorages/S3/ProxyResolverConfiguration.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorageCommon.h>
|
||||
#include <Disks/DiskLocal.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -121,7 +122,8 @@ std::unique_ptr<Aws::S3::S3Client> getClient(
|
||||
settings.request_settings.get_request_throttler,
|
||||
settings.request_settings.put_request_throttler);
|
||||
|
||||
S3::URI uri(config.getString(config_prefix + ".endpoint"));
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
S3::URI uri(endpoint);
|
||||
if (uri.key.back() != '/')
|
||||
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include <Storages/StorageS3Settings.h>
|
||||
#include <Core/ServerUUID.h>
|
||||
#include <Common/Macros.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -104,7 +105,8 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
|
||||
ContextPtr context,
|
||||
const DisksMap & /*map*/) -> DiskPtr
|
||||
{
|
||||
S3::URI uri(config.getString(config_prefix + ".endpoint"));
|
||||
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
S3::URI uri(endpoint);
|
||||
|
||||
if (uri.key.empty())
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString());
|
||||
|
@ -5,6 +5,9 @@
|
||||
#include <Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h>
|
||||
#include <Disks/ObjectStorages/DiskObjectStorage.h>
|
||||
#include <Common/assert_cast.h>
|
||||
#include <Common/Macros.h>
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -23,7 +26,7 @@ void registerDiskWebServer(DiskFactory & factory, bool global_skip_access_check)
|
||||
ContextPtr context,
|
||||
const DisksMap & /*map*/) -> DiskPtr
|
||||
{
|
||||
String uri{config.getString(config_prefix + ".endpoint")};
|
||||
String uri = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
|
||||
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
|
||||
|
||||
if (!uri.ends_with('/'))
|
||||
|
@ -432,7 +432,7 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo
|
||||
settings.json.read_bools_as_numbers,
|
||||
settings.json.read_objects_as_strings,
|
||||
settings.json.read_numbers_as_strings,
|
||||
settings.json.try_infer_objects);
|
||||
settings.json.allow_object_type);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -103,7 +103,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
|
||||
format_settings.json.validate_types_from_metadata = settings.input_format_json_validate_types_from_metadata;
|
||||
format_settings.json.validate_utf8 = settings.output_format_json_validate_utf8;
|
||||
format_settings.json_object_each_row.column_for_object_name = settings.format_json_object_each_row_column_for_object_name;
|
||||
format_settings.json.try_infer_objects = context->getSettingsRef().allow_experimental_object_type;
|
||||
format_settings.json.allow_object_type = context->getSettingsRef().allow_experimental_object_type;
|
||||
format_settings.null_as_default = settings.input_format_null_as_default;
|
||||
format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros;
|
||||
format_settings.parquet.row_group_size = settings.output_format_parquet_row_group_size;
|
||||
|
@ -161,7 +161,7 @@ struct FormatSettings
|
||||
bool try_infer_numbers_from_strings = false;
|
||||
bool validate_types_from_metadata = true;
|
||||
bool validate_utf8 = false;
|
||||
bool try_infer_objects = false;
|
||||
bool allow_object_type = false;
|
||||
} json;
|
||||
|
||||
struct
|
||||
|
@ -366,7 +366,7 @@ namespace
|
||||
transformJSONTuplesAndArraysToArrays(data_types, settings, type_indexes, json_info);
|
||||
|
||||
/// Convert Maps to Objects if needed.
|
||||
if (settings.json.try_infer_objects)
|
||||
if (settings.json.allow_object_type)
|
||||
transformMapsAndObjectsToObjects(data_types, type_indexes);
|
||||
|
||||
if (settings.json.read_objects_as_strings)
|
||||
@ -716,7 +716,7 @@ namespace
|
||||
{
|
||||
if constexpr (is_json)
|
||||
{
|
||||
if (settings.json.try_infer_objects)
|
||||
if (settings.json.allow_object_type)
|
||||
return std::make_shared<DataTypeObject>("json", true);
|
||||
}
|
||||
/// Empty Map is Map(Nothing, Nothing)
|
||||
@ -735,7 +735,7 @@ namespace
|
||||
transformInferredTypesIfNeededImpl<is_json>(value_types, settings, json_info);
|
||||
if (!checkIfTypesAreEqual(value_types))
|
||||
{
|
||||
if (settings.json.try_infer_objects)
|
||||
if (settings.json.allow_object_type)
|
||||
return std::make_shared<DataTypeObject>("json", true);
|
||||
if (settings.json.read_objects_as_strings)
|
||||
return std::make_shared<DataTypeString>();
|
||||
|
@ -112,12 +112,7 @@ endif ()
|
||||
|
||||
target_link_libraries(clickhouse_functions_obj PUBLIC ${PUBLIC_LIBS} PRIVATE ${PRIVATE_LIBS})
|
||||
|
||||
if (USE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
|
||||
# Used to forward the linking information to the final binaries such as clickhouse / unit_tests_dbms,
|
||||
# since such information are lost after we convert to OBJECT target
|
||||
add_library(clickhouse_functions INTERFACE)
|
||||
target_link_libraries(clickhouse_functions INTERFACE ${OBJECT_LIBS} ${PUBLIC_LIBS} ${PRIVATE_LIBS})
|
||||
else()
|
||||
add_library(clickhouse_functions SHARED ${OBJECT_LIBS})
|
||||
target_link_libraries(clickhouse_functions PUBLIC ${PUBLIC_LIBS} PRIVATE ${PRIVATE_LIBS})
|
||||
endif ()
|
||||
# Used to forward the linking information to the final binaries such as clickhouse / unit_tests_dbms,
|
||||
# since such information are lost after we convert to OBJECT target
|
||||
add_library(clickhouse_functions INTERFACE)
|
||||
target_link_libraries(clickhouse_functions INTERFACE ${OBJECT_LIBS} ${PUBLIC_LIBS} ${PRIVATE_LIBS})
|
||||
|
@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
#include <Functions/FunctionsConversion.h>
|
||||
#include <Interpreters/parseColumnsListForTableFunction.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -32,10 +33,11 @@ public:
|
||||
|
||||
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
|
||||
|
||||
explicit CastOverloadResolverImpl(std::optional<Diagnostic> diagnostic_, bool keep_nullable_, bool cast_ipv4_ipv6_default_on_conversion_error_)
|
||||
explicit CastOverloadResolverImpl(std::optional<Diagnostic> diagnostic_, bool keep_nullable_, bool cast_ipv4_ipv6_default_on_conversion_error_, const DataTypeValidationSettings & data_type_validation_settings_)
|
||||
: diagnostic(std::move(diagnostic_))
|
||||
, keep_nullable(keep_nullable_)
|
||||
, cast_ipv4_ipv6_default_on_conversion_error(cast_ipv4_ipv6_default_on_conversion_error_)
|
||||
, data_type_validation_settings(data_type_validation_settings_)
|
||||
{
|
||||
}
|
||||
|
||||
@ -46,13 +48,13 @@ public:
|
||||
if constexpr (internal)
|
||||
return createImpl({}, false /*keep_nullable*/, settings_ref.cast_ipv4_ipv6_default_on_conversion_error);
|
||||
|
||||
return createImpl({}, settings_ref.cast_keep_nullable, settings_ref.cast_ipv4_ipv6_default_on_conversion_error);
|
||||
return createImpl({}, settings_ref.cast_keep_nullable, settings_ref.cast_ipv4_ipv6_default_on_conversion_error, DataTypeValidationSettings(settings_ref));
|
||||
}
|
||||
|
||||
static FunctionOverloadResolverPtr createImpl(std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false, bool cast_ipv4_ipv6_default_on_conversion_error = false)
|
||||
static FunctionOverloadResolverPtr createImpl(std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false, bool cast_ipv4_ipv6_default_on_conversion_error = false, const DataTypeValidationSettings & data_type_validation_settings = {})
|
||||
{
|
||||
assert(!internal || !keep_nullable);
|
||||
return std::make_unique<CastOverloadResolverImpl>(std::move(diagnostic), keep_nullable, cast_ipv4_ipv6_default_on_conversion_error);
|
||||
return std::make_unique<CastOverloadResolverImpl>(std::move(diagnostic), keep_nullable, cast_ipv4_ipv6_default_on_conversion_error, data_type_validation_settings);
|
||||
}
|
||||
|
||||
protected:
|
||||
@ -83,6 +85,7 @@ protected:
|
||||
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
|
||||
|
||||
DataTypePtr type = DataTypeFactory::instance().get(type_col->getValue<String>());
|
||||
validateDataType(type, data_type_validation_settings);
|
||||
|
||||
if constexpr (cast_type == CastType::accurateOrNull)
|
||||
return makeNullable(type);
|
||||
@ -104,6 +107,7 @@ private:
|
||||
std::optional<Diagnostic> diagnostic;
|
||||
bool keep_nullable;
|
||||
bool cast_ipv4_ipv6_default_on_conversion_error;
|
||||
DataTypeValidationSettings data_type_validation_settings;
|
||||
};
|
||||
|
||||
|
||||
|
@ -8,16 +8,6 @@ namespace DB
|
||||
|
||||
REGISTER_FUNCTION(Hashing)
|
||||
{
|
||||
#if USE_SSL
|
||||
factory.registerFunction<FunctionMD4>();
|
||||
factory.registerFunction<FunctionHalfMD5>();
|
||||
factory.registerFunction<FunctionMD5>();
|
||||
factory.registerFunction<FunctionSHA1>();
|
||||
factory.registerFunction<FunctionSHA224>();
|
||||
factory.registerFunction<FunctionSHA256>();
|
||||
factory.registerFunction<FunctionSHA384>();
|
||||
factory.registerFunction<FunctionSHA512>();
|
||||
#endif
|
||||
factory.registerFunction<FunctionSipHash64>();
|
||||
factory.registerFunction<FunctionSipHash128>();
|
||||
factory.registerFunction<FunctionCityHash64>();
|
||||
|
27
src/Functions/FunctionsHashingSSL.cpp
Normal file
27
src/Functions/FunctionsHashingSSL.cpp
Normal file
@ -0,0 +1,27 @@
|
||||
#include "config.h"
|
||||
|
||||
#if USE_SSL
|
||||
|
||||
#include "FunctionsHashing.h"
|
||||
#include <Functions/FunctionFactory.h>
|
||||
|
||||
/// SSL functions are located in the separate FunctionsHashingSSL.cpp file
|
||||
/// to lower the compilation time of FunctionsHashing.cpp
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
REGISTER_FUNCTION(HashingSSL)
|
||||
{
|
||||
factory.registerFunction<FunctionMD4>();
|
||||
factory.registerFunction<FunctionHalfMD5>();
|
||||
factory.registerFunction<FunctionMD5>();
|
||||
factory.registerFunction<FunctionSHA1>();
|
||||
factory.registerFunction<FunctionSHA224>();
|
||||
factory.registerFunction<FunctionSHA256>();
|
||||
factory.registerFunction<FunctionSHA384>();
|
||||
factory.registerFunction<FunctionSHA512>();
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -205,7 +205,7 @@ public:
|
||||
const String & expression_return_name_)
|
||||
: expression_actions(std::move(expression_actions_))
|
||||
{
|
||||
/// Check that expression does not contain unusual actions that will break columnss structure.
|
||||
/// Check that expression does not contain unusual actions that will break columns structure.
|
||||
for (const auto & action : expression_actions->getActions())
|
||||
if (action.node->type == ActionsDAG::ActionType::ARRAY_JOIN)
|
||||
throw Exception("Expression with arrayJoin or other unusual action cannot be captured", ErrorCodes::BAD_ARGUMENTS);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user