Merge branch 'master' into less-flaky-test-concurrent-queries

This commit is contained in:
Antonio Andelic 2023-01-05 12:23:30 +01:00 committed by GitHub
commit 7dd8ac717a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
131 changed files with 2447 additions and 557 deletions

View File

@ -18,5 +18,21 @@ tests/ci/run_check.py
### Changelog entry (a user-readable short description of the changes that goes to CHANGELOG.md):
...
### Documentation entry for user-facing changes
<!---
Directly edit documentation source files in the "docs" folder with the same pull-request as code changes
or
Add a user-readable short description of the changes that should be added to docs.clickhouse.com below.
At a minimum, the following information should be added (but add more as needed).
- Motivation: Why is this function, table engine, etc. useful to ClickHouse users?
- Parameters: If the feature being added takes arguments, options or is influenced by settings, please list them below with a brief explanation.
- Example use: A query or command.
-->
> Information about CI checks: https://clickhouse.com/docs/en/development/continuous-integration/

View File

@ -2529,7 +2529,7 @@ jobs:
sudo rm -fr "$TEMP_PATH"
TestsBugfixCheck:
needs: [CheckLabels, StyleCheck]
runs-on: [self-hosted, stress-tester]
runs-on: [self-hosted, func-tester]
steps:
- name: Set envs
run: |
@ -2565,7 +2565,7 @@ jobs:
python3 functional_test_check.py "Stateless $CHECK_NAME" "$KILL_TIMEOUT" \
--validate-bugfix --post-commit-status=file || echo 'ignore exit code'
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/post_commit_status.tsv" "${TEMP_PATH}/integration/post_commit_status.tsv"
python3 bugfix_validate_check.py "${TEMP_PATH}/stateless/functional_commit_status.tsv" "${TEMP_PATH}/integration/integration_commit_status.tsv"
- name: Cleanup
if: always()
run: |

34
.gitmodules vendored
View File

@ -104,13 +104,13 @@
url = https://github.com/ClickHouse/aws-sdk-cpp.git
[submodule "aws-c-event-stream"]
path = contrib/aws-c-event-stream
url = https://github.com/ClickHouse/aws-c-event-stream.git
url = https://github.com/awslabs/aws-c-event-stream.git
[submodule "aws-c-common"]
path = contrib/aws-c-common
url = https://github.com/ClickHouse/aws-c-common.git
[submodule "aws-checksums"]
path = contrib/aws-checksums
url = https://github.com/ClickHouse/aws-checksums.git
url = https://github.com/awslabs/aws-checksums.git
[submodule "contrib/curl"]
path = contrib/curl
url = https://github.com/curl/curl.git
@ -294,3 +294,33 @@
[submodule "contrib/libdivide"]
path = contrib/libdivide
url = https://github.com/ridiculousfish/libdivide.git
[submodule "contrib/aws-crt-cpp"]
path = contrib/aws-crt-cpp
url = https://github.com/ClickHouse/aws-crt-cpp.git
[submodule "contrib/aws-c-io"]
path = contrib/aws-c-io
url = https://github.com/ClickHouse/aws-c-io.git
[submodule "contrib/aws-c-mqtt"]
path = contrib/aws-c-mqtt
url = https://github.com/awslabs/aws-c-mqtt.git
[submodule "contrib/aws-c-auth"]
path = contrib/aws-c-auth
url = https://github.com/awslabs/aws-c-auth.git
[submodule "contrib/aws-c-cal"]
path = contrib/aws-c-cal
url = https://github.com/ClickHouse/aws-c-cal.git
[submodule "contrib/aws-c-sdkutils"]
path = contrib/aws-c-sdkutils
url = https://github.com/awslabs/aws-c-sdkutils.git
[submodule "contrib/aws-c-http"]
path = contrib/aws-c-http
url = https://github.com/awslabs/aws-c-http.git
[submodule "contrib/aws-c-s3"]
path = contrib/aws-c-s3
url = https://github.com/awslabs/aws-c-s3.git
[submodule "contrib/aws-c-compression"]
path = contrib/aws-c-compression
url = https://github.com/awslabs/aws-c-compression.git
[submodule "contrib/aws-s2n-tls"]
path = contrib/aws-s2n-tls
url = https://github.com/aws/s2n-tls.git

View File

@ -115,12 +115,25 @@ endif()
add_contrib (llvm-project-cmake llvm-project)
add_contrib (libfuzzer-cmake llvm-project)
add_contrib (libxml2-cmake libxml2)
add_contrib (aws-s3-cmake
add_contrib (aws-cmake
aws
aws-c-auth
aws-c-cal
aws-c-common
aws-c-compression
aws-c-event-stream
aws-c-http
aws-c-io
aws-c-mqtt
aws-c-s3
aws-c-sdkutils
aws-s2n-tls
aws-checksums
aws-crt-cpp
aws-cmake
)
add_contrib (base64-cmake base64)
add_contrib (simdjson-cmake simdjson)
add_contrib (rapidjson-cmake rapidjson)

2
contrib/aws vendored

@ -1 +1 @@
Subproject commit 00b03604543367d7e310cb0993973fdcb723ea79
Subproject commit 4a12641211d4dbc8e2fdb2dd0f1eea0927db9252

1
contrib/aws-c-auth vendored Submodule

@ -0,0 +1 @@
Subproject commit 30df6c407e2df43bd244e2c34c9b4a4b87372bfb

1
contrib/aws-c-cal vendored Submodule

@ -0,0 +1 @@
Subproject commit 85dd7664b786a389c6fb1a6f031ab4bb2282133d

@ -1 +1 @@
Subproject commit 736a82d1697c108b04a277e66438a7f4e19b6857
Subproject commit 324fd1d973ccb25c813aa747bf1759cfde5121c5

1
contrib/aws-c-compression vendored Submodule

@ -0,0 +1 @@
Subproject commit b517b7decd0dac30be2162f5186c250221c53aff

@ -1 +1 @@
Subproject commit 3bc33662f9ccff4f4cbcf9509cc78c26e022fde0
Subproject commit 39bfa94a14b7126bf0c1330286ef8db452d87e66

1
contrib/aws-c-http vendored Submodule

@ -0,0 +1 @@
Subproject commit 2c5a2a7d5556600b9782ffa6c9d7e09964df1abc

1
contrib/aws-c-io vendored Submodule

@ -0,0 +1 @@
Subproject commit 5d32c453560d0823df521a686bf7fbacde7f9be3

1
contrib/aws-c-mqtt vendored Submodule

@ -0,0 +1 @@
Subproject commit 882c689561a3db1466330ccfe3b63637e0a575d3

1
contrib/aws-c-s3 vendored Submodule

@ -0,0 +1 @@
Subproject commit a41255ece72a7c887bba7f9d998ca3e14f4c8a1b

1
contrib/aws-c-sdkutils vendored Submodule

@ -0,0 +1 @@
Subproject commit 25bf5cf225f977c3accc6a05a0a7a181ef2a4a30

@ -1 +1 @@
Subproject commit 519d6d9093819b6cf89ffff589a27ef8f83d0f65
Subproject commit 48e7c0e01479232f225c8044d76c84e74192889d

View File

@ -0,0 +1,114 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCSourceRuns)
option(USE_CPU_EXTENSIONS "Whenever possible, use functions optimized for CPUs with specific extensions (ex: SSE, AVX)." ON)
# In the current (11/2/21) state of mingw64, the packaged gcc is not capable of emitting properly aligned avx2 instructions under certain circumstances.
# This leads to crashes for windows builds using mingw64 when invoking the avx2-enabled versions of certain functions. Until we can find a better
# work-around, disable avx2 (and all other extensions) in mingw builds.
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=54412
#
if (MINGW)
message(STATUS "MINGW detected! Disabling avx2 and other CPU extensions")
set(USE_CPU_EXTENSIONS OFF)
endif()
if(NOT CMAKE_CROSSCOMPILING)
check_c_source_runs("
#include <stdbool.h>
bool foo(int a, int b, int *c) {
return __builtin_mul_overflow(a, b, c);
}
int main() {
int out;
if (foo(1, 2, &out)) {
return 0;
}
return 0;
}" AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
if (USE_CPU_EXTENSIONS)
check_c_source_runs("
int main() {
int foo = 42;
_mulx_u32(1, 2, &foo);
return foo != 2;
}" AWS_HAVE_MSVC_MULX)
endif()
endif()
check_c_source_compiles("
#include <Windows.h>
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
int main() {
return 0;
}
#else
it's not windows desktop
#endif
" AWS_HAVE_WINAPI_DESKTOP)
check_c_source_compiles("
int main() {
#if !(defined(__x86_64__) || defined(__i386__) || defined(_M_X64) || defined(_M_IX86))
# error \"not intel\"
#endif
return 0;
}
" AWS_ARCH_INTEL)
check_c_source_compiles("
int main() {
#if !(defined(__aarch64__) || defined(_M_ARM64))
# error \"not arm64\"
#endif
return 0;
}
" AWS_ARCH_ARM64)
check_c_source_compiles("
int main() {
#if !(defined(__arm__) || defined(_M_ARM))
# error \"not arm\"
#endif
return 0;
}
" AWS_ARCH_ARM32)
check_c_source_compiles("
int main() {
int foo = 42, bar = 24;
__asm__ __volatile__(\"\":\"=r\"(foo):\"r\"(bar):\"memory\");
}" AWS_HAVE_GCC_INLINE_ASM)
check_c_source_compiles("
#include <sys/auxv.h>
int main() {
#ifdef __linux__
getauxval(AT_HWCAP);
getauxval(AT_HWCAP2);
#endif
return 0;
}" AWS_HAVE_AUXV)
string(REGEX MATCH "^(aarch64|arm)" ARM_CPU "${CMAKE_SYSTEM_PROCESSOR}")
if(NOT LEGACY_COMPILER_SUPPORT OR ARM_CPU)
check_c_source_compiles("
#include <execinfo.h>
int main() {
backtrace(NULL, 0);
return 0;
}" AWS_HAVE_EXECINFO)
endif()
check_c_source_compiles("
#include <linux/if_link.h>
int main() {
return 1;
}" AWS_HAVE_LINUX_IF_LINK_H)

View File

@ -0,0 +1,74 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckCCompilerFlag)
include(CheckIncludeFile)
if (USE_CPU_EXTENSIONS)
if (MSVC)
check_c_compiler_flag("/arch:AVX2" HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "/arch:AVX2")
endif()
else()
check_c_compiler_flag(-mavx2 HAVE_M_AVX2_FLAG)
if (HAVE_M_AVX2_FLAG)
set(AVX2_CFLAGS "-mavx -mavx2")
endif()
endif()
cmake_push_check_state()
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${AVX2_CFLAGS}")
check_c_source_compiles("
#include <immintrin.h>
#include <emmintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
_mm256_shuffle_epi8(vec, vec);
_mm256_set_epi32(1,2,3,4,5,6,7,8);
_mm256_permutevar8x32_epi32(vec, vec);
return 0;
}" HAVE_AVX2_INTRINSICS)
check_c_source_compiles("
#include <immintrin.h>
#include <string.h>
int main() {
__m256i vec;
memset(&vec, 0, sizeof(vec));
return (int)_mm256_extract_epi64(vec, 2);
}" HAVE_MM256_EXTRACT_EPI64)
cmake_pop_check_state()
endif() # USE_CPU_EXTENSIONS
macro(simd_add_definition_if target definition)
if(${definition})
target_compile_definitions(${target} PRIVATE -D${definition})
endif(${definition})
endmacro(simd_add_definition_if)
# Configure private preprocessor definitions for SIMD-related features
# Does not set any processor feature codegen flags
function(simd_add_definitions target)
simd_add_definition_if(${target} HAVE_AVX2_INTRINSICS)
simd_add_definition_if(${target} HAVE_MM256_EXTRACT_EPI64)
endfunction(simd_add_definitions)
# Adds source files only if AVX2 is supported. These files will be built with
# avx2 intrinsics enabled.
# Usage: simd_add_source_avx2(target file1.c file2.c ...)
function(simd_add_source_avx2 target)
foreach(file ${ARGN})
target_sources(${target} PRIVATE ${file})
set_source_files_properties(${file} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
endforeach()
endfunction(simd_add_source_avx2)

View File

@ -0,0 +1,50 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check if the platform supports setting thread affinity
# (important for hitting full NIC entitlement on NUMA architectures)
function(aws_set_thread_affinity_method target)
# Non-POSIX, Android, and Apple platforms do not support thread affinity.
if (NOT UNIX OR ANDROID OR APPLE)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
set(headers "pthread.h")
# BSDs put nonportable pthread declarations in a separate header.
if(CMAKE_SYSTEM_NAME MATCHES BSD)
set(headers "${headers};pthread_np.h")
endif()
# Using pthread attrs is the preferred method, but is glibc-specific.
check_symbol_exists(pthread_attr_setaffinity_np "${headers}" USE_PTHREAD_ATTR_SETAFFINITY)
if (USE_PTHREAD_ATTR_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD_ATTR)
return()
endif()
# This method is still nonportable, but is supported by musl and BSDs.
check_symbol_exists(pthread_setaffinity_np "${headers}" USE_PTHREAD_SETAFFINITY)
if (USE_PTHREAD_SETAFFINITY)
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_PTHREAD)
return()
endif()
# If we got here, we expected thread affinity support but didn't find it.
# We still build with degraded NUMA performance, but show a warning.
message(WARNING "No supported method for setting thread affinity")
target_compile_definitions(${target} PRIVATE
-DAWS_AFFINITY_METHOD=AWS_AFFINITY_METHOD_NONE)
cmake_pop_check_state()
endfunction()

View File

@ -0,0 +1,61 @@
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
include(CheckSymbolExists)
# Check how the platform supports setting thread name
function(aws_set_thread_name_method target)
if (WINDOWS)
# On Windows we do a runtime check, instead of compile-time check
return()
elseif (APPLE)
# All Apple platforms we support have the same function, so no need for compile-time check.
return()
endif()
cmake_push_check_state()
list(APPEND CMAKE_REQUIRED_DEFINITIONS -D_GNU_SOURCE)
list(APPEND CMAKE_REQUIRED_LIBRARIES pthread)
# The start of the test program
set(c_source_start "
#define _GNU_SOURCE
#include <pthread.h>
#if defined(__FreeBSD__) || defined(__NETBSD__)
#include <pthread_np.h>
#endif
int main() {
pthread_t thread_id;
")
# The end of the test program
set(c_source_end "}")
# pthread_setname_np() usually takes 2 args
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\");
${c_source_end}"
PTHREAD_SETNAME_TAKES_2ARGS)
if (PTHREAD_SETNAME_TAKES_2ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_2ARGS)
return()
endif()
# But on NetBSD it takes 3!
check_c_source_compiles("
${c_source_start}
pthread_setname_np(thread_id, \"asdf\", NULL);
${c_source_end}
" PTHREAD_SETNAME_TAKES_3ARGS)
if (PTHREAD_SETNAME_TAKES_3ARGS)
target_compile_definitions(${target} PRIVATE -DAWS_PTHREAD_SETNAME_TAKES_3ARGS)
return()
endif()
# And on many older/weirder platforms it's just not supported
cmake_pop_check_state()
endfunction()

View File

@ -0,0 +1,376 @@
set(ENABLE_AWS_S3_DEFAULT OFF)
if(ENABLE_LIBRARIES AND (OS_LINUX OR OS_DARWIN) AND TARGET OpenSSL::Crypto)
set(ENABLE_AWS_S3_DEFAULT ON)
endif()
option(ENABLE_AWS_S3 "Enable AWS S3" ${ENABLE_AWS_S3_DEFAULT})
if(ENABLE_AWS_S3)
if(NOT TARGET OpenSSL::Crypto)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK without OpenSSL")
elseif(NOT (OS_LINUX OR OS_DARWIN))
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use AWS SDK with platform ${CMAKE_SYSTEM_NAME}")
endif()
endif()
if(NOT ENABLE_AWS_S3)
message(STATUS "Not using AWS S3")
return()
endif()
# Utilities.
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsFeatureTests.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadAffinity.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsThreadName.cmake")
include("${ClickHouse_SOURCE_DIR}/contrib/aws-cmake/AwsSIMD.cmake")
# Gather sources and options.
set(AWS_SOURCES)
set(AWS_PUBLIC_INCLUDES)
set(AWS_PRIVATE_INCLUDES)
set(AWS_PUBLIC_COMPILE_DEFS)
set(AWS_PRIVATE_COMPILE_DEFS)
set(AWS_PRIVATE_LIBS)
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DDEBUG_BUILD")
endif()
set(ENABLE_OPENSSL_ENCRYPTION ON)
if (ENABLE_OPENSSL_ENCRYPTION)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DENABLE_OPENSSL_ENCRYPTION")
endif()
set(USE_S2N ON)
if (USE_S2N)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_S2N")
endif()
# Directories.
SET(AWS_SDK_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws")
SET(AWS_SDK_CORE_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-core")
SET(AWS_SDK_S3_DIR "${AWS_SDK_DIR}/aws-cpp-sdk-s3")
SET(AWS_AUTH_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-auth")
SET(AWS_CAL_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-cal")
SET(AWS_CHECKSUMS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
SET(AWS_COMMON_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
SET(AWS_COMPRESSION_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-compression")
SET(AWS_CRT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-crt-cpp")
SET(AWS_EVENT_STREAM_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
SET(AWS_HTTP_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-http")
SET(AWS_IO_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-io")
SET(AWS_MQTT_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-mqtt")
SET(AWS_S2N_TLS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-s2n-tls")
SET(AWS_S3_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-s3")
SET(AWS_SDKUTILS_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-sdkutils")
# aws-cpp-sdk-core
file(GLOB AWS_SDK_CORE_SRC
"${AWS_SDK_CORE_DIR}/source/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/bearer-token-provider/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/signer/*.cpp"
"${AWS_SDK_CORE_DIR}/source/auth/signer-provider/*.cpp"
"${AWS_SDK_CORE_DIR}/source/client/*.cpp"
"${AWS_SDK_CORE_DIR}/source/config/*.cpp"
"${AWS_SDK_CORE_DIR}/source/config/defaults/*.cpp"
"${AWS_SDK_CORE_DIR}/source/endpoint/*.cpp"
"${AWS_SDK_CORE_DIR}/source/endpoint/internal/*.cpp"
"${AWS_SDK_CORE_DIR}/source/external/cjson/*.cpp"
"${AWS_SDK_CORE_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_SDK_CORE_DIR}/source/http/*.cpp"
"${AWS_SDK_CORE_DIR}/source/http/standard/*.cpp"
"${AWS_SDK_CORE_DIR}/source/internal/*.cpp"
"${AWS_SDK_CORE_DIR}/source/monitoring/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/base64/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/event/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/json/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/logging/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/memory/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/stream/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/threading/*.cpp"
"${AWS_SDK_CORE_DIR}/source/utils/xml/*.cpp"
)
if(OS_LINUX OR OS_DARWIN)
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/linux-shared/*.cpp")
file(GLOB AWS_SDK_CORE_PLATFORM_SRC "${AWS_SDK_CORE_DIR}/source/platform/linux-shared/*.cpp")
else()
file(GLOB AWS_SDK_CORE_NET_SRC "${AWS_SDK_CORE_DIR}/source/net/*.cpp")
set(AWS_SDK_CORE_PLATFORM_SRC)
endif()
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MAJOR=1")
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_MINOR=10")
list(APPEND AWS_PUBLIC_COMPILE_DEFS "-DAWS_SDK_VERSION_PATCH=36")
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
list(APPEND AWS_PUBLIC_INCLUDES
"${AWS_SDK_CORE_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
# aws-cpp-sdk-s3
file(GLOB AWS_SDK_S3_SRC
"${AWS_SDK_S3_DIR}/source/*.cpp"
"${AWS_SDK_S3_DIR}/source/model/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_SDK_S3_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDK_S3_DIR}/include/")
# aws-c-auth
file(GLOB AWS_AUTH_SRC
"${AWS_AUTH_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_AUTH_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_AUTH_DIR}/include/")
# aws-c-cal
file(GLOB AWS_CAL_SRC
"${AWS_CAL_DIR}/source/*.c"
)
if (ENABLE_OPENSSL_ENCRYPTION)
file(GLOB AWS_CAL_OS_SRC
"${AWS_CAL_DIR}/source/unix/*.c"
)
list(APPEND AWS_PRIVATE_LIBS OpenSSL::Crypto)
endif()
list(APPEND AWS_SOURCES ${AWS_CAL_SRC} ${AWS_CAL_OS_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CAL_DIR}/include/")
# aws-c-event-stream
file(GLOB AWS_EVENT_STREAM_SRC
"${AWS_EVENT_STREAM_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_EVENT_STREAM_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_EVENT_STREAM_DIR}/include/")
# aws-c-common
file(GLOB AWS_COMMON_SRC
"${AWS_COMMON_DIR}/source/*.c"
"${AWS_COMMON_DIR}/source/external/*.c"
"${AWS_COMMON_DIR}/source/posix/*.c"
)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/generic/*.c"
)
if (AWS_ARCH_INTEL)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/intel/cpuid.c"
"${AWS_COMMON_DIR}/source/arch/intel/asm/*.c"
)
elseif (AWS_ARCH_ARM64 OR AWS_ARCH_ARM32)
if (AWS_HAVE_AUXV)
file(GLOB AWS_COMMON_ARCH_SRC
"${AWS_COMMON_DIR}/source/arch/arm/asm/*.c"
)
endif()
endif()
set(AWS_COMMON_AVX2_SRC)
if (HAVE_AVX2_INTRINSICS)
list(APPEND AWS_PRIVATE_COMPILE_DEFS "-DUSE_SIMD_ENCODING")
set(AWS_COMMON_AVX2_SRC "${AWS_COMMON_DIR}/source/arch/intel/encoding_avx2.c")
set_source_files_properties(${AWS_COMMON_AVX2_SRC} PROPERTIES COMPILE_FLAGS "${AVX2_CFLAGS}")
endif()
configure_file("${AWS_COMMON_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
list(APPEND AWS_SOURCES ${AWS_COMMON_SRC} ${AWS_COMMON_ARCH_SRC} ${AWS_COMMON_AVX2_SRC})
list(APPEND AWS_PUBLIC_INCLUDES
"${AWS_COMMON_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include"
)
# aws-checksums
file(GLOB AWS_CHECKSUMS_SRC
"${AWS_CHECKSUMS_DIR}/source/*.c"
"${AWS_CHECKSUMS_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
)
if(AWS_ARCH_INTEL AND AWS_HAVE_GCC_INLINE_ASM)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/intel/asm/*.c"
)
endif()
if (AWS_ARCH_ARM64)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
)
set_source_files_properties("${AWS_CHECKSUMS_DIR}/source/arm/crc32c_arm.c" PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
elseif (AWS_ARCH_ARM32)
if (AWS_ARM32_CRC)
file(GLOB AWS_CHECKSUMS_ARCH_SRC
"${AWS_CHECKSUMS_DIR}/source/arm/*.c"
"${AWS_CHECKSUMS_DIR}/source/arm/asm/*.c"
)
set_source_files_properties(source/arm/crc32c_arm.c PROPERTIES COMPILE_FLAGS -march=armv8-a+crc)
endif()
endif()
list(APPEND AWS_SOURCES ${AWS_CHECKSUMS_SRC} ${AWS_CHECKSUMS_ARCH_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_CHECKSUMS_DIR}/include/")
# aws-c-io
file(GLOB AWS_IO_SRC
"${AWS_IO_DIR}/source/*.c"
)
if (OS_LINUX)
file(GLOB AWS_IO_OS_SRC
"${AWS_IO_DIR}/source/linux/*.c"
"${AWS_IO_DIR}/source/posix/*.c"
)
elseif (OS_DARWIN)
file(GLOB AWS_IO_OS_SRC
"${AWS_IO_DIR}/source/bsd/*.c"
"${AWS_IO_DIR}/source/posix/*.c"
)
endif()
set(AWS_IO_TLS_SRC)
if (USE_S2N)
file(GLOB AWS_IO_TLS_SRC
"${AWS_IO_DIR}/source/s2n/*.c"
)
endif()
list(APPEND AWS_SOURCES ${AWS_IO_SRC} ${AWS_IO_OS_SRC} ${AWS_IO_TLS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_IO_DIR}/include/")
# aws-s2n-tls
if (USE_S2N)
file(GLOB AWS_S2N_TLS_SRC
"${AWS_S2N_TLS_DIR}/crypto/*.c"
"${AWS_S2N_TLS_DIR}/error/*.c"
"${AWS_S2N_TLS_DIR}/stuffer/*.c"
"${AWS_S2N_TLS_DIR}/pq-crypto/*.c"
"${AWS_S2N_TLS_DIR}/pq-crypto/kyber_r3/*.c"
"${AWS_S2N_TLS_DIR}/tls/*.c"
"${AWS_S2N_TLS_DIR}/tls/extensions/*.c"
"${AWS_S2N_TLS_DIR}/utils/*.c"
)
list(APPEND AWS_SOURCES ${AWS_S2N_TLS_SRC})
list(APPEND AWS_PRIVATE_INCLUDES
"${AWS_S2N_TLS_DIR}/"
"${AWS_S2N_TLS_DIR}/api/"
)
endif()
# aws-crt-cpp
file(GLOB AWS_CRT_SRC
"${AWS_CRT_DIR}/source/*.cpp"
"${AWS_CRT_DIR}/source/auth/*.cpp"
"${AWS_CRT_DIR}/source/crypto/*.cpp"
"${AWS_CRT_DIR}/source/endpoints/*.cpp"
"${AWS_CRT_DIR}/source/external/*.cpp"
"${AWS_CRT_DIR}/source/http/*.cpp"
"${AWS_CRT_DIR}/source/io/*.cpp"
)
list(APPEND AWS_SOURCES ${AWS_CRT_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_CRT_DIR}/include/")
# aws-c-mqtt
file(GLOB AWS_MQTT_SRC
"${AWS_MQTT_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_MQTT_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_MQTT_DIR}/include/")
# aws-c-http
file(GLOB AWS_HTTP_SRC
"${AWS_HTTP_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_HTTP_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_HTTP_DIR}/include/")
# aws-c-compression
file(GLOB AWS_COMPRESSION_SRC
"${AWS_COMPRESSION_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_COMPRESSION_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_COMPRESSION_DIR}/include/")
# aws-c-s3
file(GLOB AWS_S3_SRC
"${AWS_S3_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_S3_SRC})
list(APPEND AWS_PRIVATE_INCLUDES "${AWS_S3_DIR}/include/")
# aws-c-sdkutils
file(GLOB AWS_SDKUTILS_SRC
"${AWS_SDKUTILS_DIR}/source/*.c"
)
list(APPEND AWS_SOURCES ${AWS_SDKUTILS_SRC})
list(APPEND AWS_PUBLIC_INCLUDES "${AWS_SDKUTILS_DIR}/include/")
# Add library.
add_library(_aws ${AWS_SOURCES})
target_include_directories(_aws SYSTEM BEFORE PUBLIC ${AWS_PUBLIC_INCLUDES})
target_include_directories(_aws SYSTEM BEFORE PRIVATE ${AWS_PRIVATE_INCLUDES})
target_compile_definitions(_aws PUBLIC ${AWS_PUBLIC_COMPILE_DEFS})
target_compile_definitions(_aws PRIVATE ${AWS_PRIVATE_COMPILE_DEFS})
target_link_libraries(_aws PRIVATE ${AWS_PRIVATE_LIBS})
aws_set_thread_affinity_method(_aws)
aws_set_thread_name_method(_aws)
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options (_aws PRIVATE -g0)
endif()
add_library(ch_contrib::aws_s3 ALIAS _aws)

1
contrib/aws-crt-cpp vendored Submodule

@ -0,0 +1 @@
Subproject commit ec0bea288f451d884c0d80d534bc5c66241c39a4

1
contrib/aws-s2n-tls vendored Submodule

@ -0,0 +1 @@
Subproject commit 15d534e8a9ca1eda6bacee514e37d08b4f38a526

View File

@ -1,122 +0,0 @@
if(NOT OS_FREEBSD)
option(ENABLE_S3 "Enable S3" ${ENABLE_LIBRARIES})
elseif(ENABLE_S3)
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use S3 on FreeBSD")
endif()
if(NOT ENABLE_S3)
message(STATUS "Not using S3")
return()
endif()
SET(AWS_S3_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-s3")
SET(AWS_CORE_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws/aws-cpp-sdk-core")
SET(AWS_CHECKSUMS_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-checksums")
SET(AWS_COMMON_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-common")
SET(AWS_EVENT_STREAM_LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/aws-c-event-stream")
OPTION(USE_AWS_MEMORY_MANAGEMENT "Aws memory management" OFF)
configure_file("${AWS_CORE_LIBRARY_DIR}/include/aws/core/SDKConfig.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
configure_file("${AWS_COMMON_LIBRARY_DIR}/include/aws/common/config.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/common/config.h" @ONLY)
file(GLOB AWS_CORE_SOURCES
"${AWS_CORE_LIBRARY_DIR}/source/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/auth/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/client/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/http/standard/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/config/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/cjson/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/external/tinyxml2/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/internal/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/monitoring/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/net/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/platform/linux-shared/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/base64/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/event/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/openssl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/crypto/factory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/json/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/logging/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/memory/stl/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/stream/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/threading/*.cpp"
"${AWS_CORE_LIBRARY_DIR}/source/utils/xml/*.cpp"
)
file(GLOB AWS_S3_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/*.cpp"
)
file(GLOB AWS_S3_MODEL_SOURCES
"${AWS_S3_LIBRARY_DIR}/source/model/*.cpp"
)
file(GLOB AWS_EVENT_STREAM_SOURCES
"${AWS_EVENT_STREAM_LIBRARY_DIR}/source/*.c"
)
file(GLOB AWS_COMMON_SOURCES
"${AWS_COMMON_LIBRARY_DIR}/source/*.c"
"${AWS_COMMON_LIBRARY_DIR}/source/posix/*.c"
)
file(GLOB AWS_CHECKSUMS_SOURCES
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/intel/*.c"
"${AWS_CHECKSUMS_LIBRARY_DIR}/source/arm/*.c"
)
file(GLOB S3_UNIFIED_SRC
${AWS_EVENT_STREAM_SOURCES}
${AWS_COMMON_SOURCES}
${AWS_S3_SOURCES}
${AWS_S3_MODEL_SOURCES}
${AWS_CORE_SOURCES}
)
set(S3_INCLUDES
"${AWS_COMMON_LIBRARY_DIR}/include/"
"${AWS_EVENT_STREAM_LIBRARY_DIR}/include/"
"${AWS_S3_LIBRARY_DIR}/include/"
"${AWS_CORE_LIBRARY_DIR}/include/"
"${CMAKE_CURRENT_BINARY_DIR}/include/"
)
add_library(_aws_s3_checksums ${AWS_CHECKSUMS_SOURCES})
target_include_directories(_aws_s3_checksums SYSTEM PUBLIC "${AWS_CHECKSUMS_LIBRARY_DIR}/include/")
if(CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
target_compile_definitions(_aws_s3_checksums PRIVATE "-DDEBUG_BUILD")
endif()
set_target_properties(_aws_s3_checksums PROPERTIES LINKER_LANGUAGE C)
set_property(TARGET _aws_s3_checksums PROPERTY C_STANDARD 99)
add_library(_aws_s3 ${S3_UNIFIED_SRC})
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MAJOR=1")
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_MINOR=7")
target_compile_definitions(_aws_s3 PUBLIC "AWS_SDK_VERSION_PATCH=231")
target_include_directories(_aws_s3 SYSTEM BEFORE PUBLIC ${S3_INCLUDES})
if (TARGET OpenSSL::SSL)
target_compile_definitions(_aws_s3 PUBLIC -DENABLE_OPENSSL_ENCRYPTION)
target_link_libraries(_aws_s3 PRIVATE OpenSSL::Crypto OpenSSL::SSL)
endif()
target_link_libraries(_aws_s3 PRIVATE _aws_s3_checksums)
# The library is large - avoid bloat.
if (OMIT_HEAVY_DEBUG_SYMBOLS)
target_compile_options (_aws_s3 PRIVATE -g0)
target_compile_options (_aws_s3_checksums PRIVATE -g0)
endif()
add_library(ch_contrib::aws_s3 ALIAS _aws_s3)

2
contrib/googletest vendored

@ -1 +1 @@
Subproject commit e7e591764baba0a0c3c9ad0014430e7a27331d16
Subproject commit 71140c3ca7a87bb1b5b9c9f1500fea8858cce344

2
contrib/sysroot vendored

@ -1 +1 @@
Subproject commit 0f41651860fa4a530ecd68b93a15b8fd77397adf
Subproject commit f0081b2649b94837855f3bc7d05ef326b100bad8

View File

@ -621,7 +621,7 @@ CREATE TABLE example_table
ENGINE = MergeTree
PARTITION BY toYYYYMM(d)
ORDER BY d
TTL d + INTERVAL 1 MONTH [DELETE],
TTL d + INTERVAL 1 MONTH DELETE,
d + INTERVAL 1 WEEK TO VOLUME 'aaa',
d + INTERVAL 2 WEEK TO DISK 'bbb';
```

View File

@ -6,6 +6,17 @@ sidebar_label: Data Replication
# Data Replication
:::note
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
```
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
```
with:
```
ENGINE = ReplicatedReplacingMergeTree
```
:::
Replication is only supported for tables in the MergeTree family:
- ReplicatedMergeTree
@ -85,15 +96,15 @@ Example of setting the addresses of the auxiliary ZooKeeper cluster:
</auxiliary_zookeepers>
```
To store table metadata in an auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
ReplicatedMergeTree engine as follow:
To store table metadata in an auxiliary ZooKeeper cluster instead of the default ZooKeeper cluster, we can use SQL to create the table with
ReplicatedMergeTree engine as follows:
```
CREATE TABLE table_name ( ... ) ENGINE = ReplicatedMergeTree('zookeeper_name_configured_in_auxiliary_zookeepers:path', 'replica_name') ...
```
You can specify any existing ZooKeeper cluster and the system will use a directory on it for its own data (the directory is specified when creating a replicatable table).
If ZooKeeper isnt set in the config file, you cant create replicated tables, and any existing replicated tables will be read-only.
If ZooKeeper is not set in the config file, you cant create replicated tables, and any existing replicated tables will be read-only.
ZooKeeper is not used in `SELECT` queries because replication does not affect the performance of `SELECT` and queries run just as fast as they do for non-replicated tables. When querying distributed replicated tables, ClickHouse behavior is controlled by the settings [max_replica_delay_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-max_replica_delay_for_distributed_queries) and [fallback_to_stale_replicas_for_distributed_queries](/docs/en/operations/settings/settings.md/#settings-fallback_to_stale_replicas_for_distributed_queries).
@ -119,8 +130,23 @@ The system monitors data synchronicity on replicas and is able to recover after
## Creating Replicated Tables {#creating-replicated-tables}
:::note
In ClickHouse Cloud replication is managed for you. Please create your tables without adding arguments. For example, in the text below you would replace:
```
ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
```
with:
```
ENGINE = ReplicatedReplacingMergeTree
```
:::
The `Replicated` prefix is added to the table engine name. For example:`ReplicatedMergeTree`.
:::tip
Adding `Replicated` is optional in ClickHouse Cloud, as all of the tables are replicated.
:::
### Replicated\*MergeTree parameters
#### zoo_path
@ -144,7 +170,7 @@ CREATE TABLE table_name
CounterID UInt32,
UserID UInt32,
ver UInt16
) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', ver)
) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', ver)
PARTITION BY toYYYYMM(EventDate)
ORDER BY (CounterID, EventDate, intHash32(UserID))
SAMPLE BY intHash32(UserID);
@ -160,7 +186,7 @@ CREATE TABLE table_name
EventDate DateTime,
CounterID UInt32,
UserID UInt32
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192);
) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/table_name', '{replica}', EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192);
```
</details>
@ -171,7 +197,6 @@ Example:
``` xml
<macros>
<layer>05</layer>
<shard>02</shard>
<replica>example05-02-1</replica>
</macros>
@ -182,12 +207,12 @@ In this case, the path consists of the following parts:
`/clickhouse/tables/` is the common prefix. We recommend using exactly this one.
`{layer}-{shard}` is the shard identifier. In this example it consists of two parts, since the example cluster uses bi-level sharding. For most tasks, you can leave just the {shard} substitution, which will be expanded to the shard identifier.
`{shard}` will be expanded to the shard identifier.
`table_name` is the name of the node for the table in ClickHouse Keeper. It is a good idea to make it the same as the table name. It is defined explicitly, because in contrast to the table name, it does not change after a RENAME query.
*HINT*: you could add a database name in front of `table_name` as well. E.g. `db_name.table_name`
The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{layer}-{shard}/{database}/{table}'`.
The two built-in substitutions `{database}` and `{table}` can be used, they expand into the table name and the database name respectively (unless these macros are defined in the `macros` section). So the zookeeper path can be specified as `'/clickhouse/tables/{shard}/{database}/{table}'`.
Be careful with table renames when using these built-in substitutions. The path in ClickHouse Keeper cannot be changed, and when the table is renamed, the macros will expand into a different path, the table will refer to a path that does not exist in ClickHouse Keeper, and will go into read-only mode.
The replica name identifies different replicas of the same table. You can use the server name for this, as in the example. The name only needs to be unique within each shard.

View File

@ -9,6 +9,29 @@ slug: /en/operations/backup
- [Backup/restore using an S3 disk](#backuprestore-using-an-s3-disk)
- [Alternatives](#alternatives)
## Command summary
```bash
BACKUP|RESTORE
TABLE [db.]table_name [AS [db.]table_name_in_backup]
[PARTITION[S] partition_expr [,...]] |
DICTIONARY [db.]dictionary_name [AS [db.]name_in_backup] |
DATABASE database_name [AS database_name_in_backup]
[EXCEPT TABLES ...] |
TEMPORARY TABLE table_name [AS table_name_in_backup] |
VIEW view_name [AS view_name_in_backup]
ALL TEMPORARY TABLES [EXCEPT ...] |
ALL DATABASES [EXCEPT ...] } [,...]
[ON CLUSTER 'cluster_name']
TO|FROM File('<path>/<filename>') | Disk('<disk_name>', '<path>/') | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')
[SETTINGS base_backup = File('<path>/<filename>') | Disk(...) | S3('<S3 endpoint>/<path>', '<Access key ID>', '<Secret access key>')]
```
:::note ALL
`ALL` is only applicable to the `RESTORE` command.
:::
## Background
While [replication](../engines/table-engines/mergetree-family/replication.md) provides protection from hardware failures, it does not protect against human errors: accidental deletion of data, deletion of the wrong table or a table on the wrong cluster, and software bugs that result in incorrect data processing or data corruption. In many cases mistakes like these will affect all replicas. ClickHouse has built-in safeguards to prevent some types of mistakes — for example, by default [you cant just drop tables with a MergeTree-like engine containing more than 50 Gb of data](server-configuration-parameters/settings.md#max-table-size-to-drop). However, these safeguards do not cover all possible cases and can be circumvented.

View File

@ -1027,6 +1027,186 @@ Result:
└─────────────┘
```
## h3PointDistM
Returns the "great circle" or "haversine" distance between pairs of GeoCoord points (latitude/longitude) pairs in meters.
**Syntax**
``` sql
h3PointDistM(lat1, lon1, lat2, lon2)
```
**Arguments**
- `lat1`, `lon1` — Latitude and Longitude of point1 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
- `lat2`, `lon2` — Latitude and Longitude of point2 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
**Returned values**
- Haversine or great circle distance in meters.
Type: [Float64](../../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
select h3PointDistM(-10.0 ,0.0, 10.0, 0.0) as h3PointDistM;
```
Result:
``` text
┌──────h3PointDistM─┐
│ 2223901.039504589 │
└───────────────────┘
```
## h3PointDistKm
Returns the "great circle" or "haversine" distance between pairs of GeoCoord points (latitude/longitude) pairs in kilometers.
**Syntax**
``` sql
h3PointDistKm(lat1, lon1, lat2, lon2)
```
**Arguments**
- `lat1`, `lon1` — Latitude and Longitude of point1 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
- `lat2`, `lon2` — Latitude and Longitude of point2 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
**Returned values**
- Haversine or great circle distance in kilometers.
Type: [Float64](../../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
select h3PointDistKm(-10.0 ,0.0, 10.0, 0.0) as h3PointDistKm;
```
Result:
``` text
┌─────h3PointDistKm─┐
│ 2223.901039504589 │
└───────────────────┘
```
## h3PointDistRads
Returns the "great circle" or "haversine" distance between pairs of GeoCoord points (latitude/longitude) pairs in radians.
**Syntax**
``` sql
h3PointDistRads(lat1, lon1, lat2, lon2)
```
**Arguments**
- `lat1`, `lon1` — Latitude and Longitude of point1 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
- `lat2`, `lon2` — Latitude and Longitude of point2 in degrees. Type: [Float64](../../../sql-reference/data-types/float.md).
**Returned values**
- Haversine or great circle distance in radians.
Type: [Float64](../../../sql-reference/data-types/float.md).
**Example**
Query:
``` sql
select h3PointDistRads(-10.0 ,0.0, 10.0, 0.0) as h3PointDistRads;
```
Result:
``` text
┌────h3PointDistRads─┐
│ 0.3490658503988659 │
└────────────────────┘
```
## h3GetRes0Indexes
Returns an array of all the resolution 0 H3 indexes.
**Syntax**
``` sql
h3GetRes0Indexes()
```
**Returned values**
- Array of all the resolution 0 H3 indexes.
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
select h3GetRes0Indexes as indexes ;
```
Result:
``` text
┌─indexes─────────────────────────────────────┐
│ [576495936675512319,576531121047601151,....]│
└─────────────────────────────────────────────┘
```
## h3GetPentagonIndexes
Returns all the pentagon H3 indexes at the specified resolution.
**Syntax**
``` sql
h3GetPentagonIndexes(resolution)
```
**Parameter**
- `resolution` — Index resolution. Range: `[0, 15]`. Type: [UInt8](../../../sql-reference/data-types/int-uint.md).
**Returned value**
- Array of all pentagon H3 indexes.
Type: [Array](../../../sql-reference/data-types/array.md)([UInt64](../../../sql-reference/data-types/int-uint.md)).
**Example**
Query:
``` sql
SELECT h3GetPentagonIndexes(3) AS indexes;
```
Result:
``` text
┌─indexes────────────────────────────────────────────────────────┐
│ [590112357393367039,590464201114255359,590816044835143679,...] │
└────────────────────────────────────────────────────────────────┘
```
## h3Line
Returns the line of indices between the two indices that are provided.

View File

@ -8,7 +8,7 @@ sidebar_label: Type Conversion
## Common Issues of Numeric Conversions
When you convert a value from one to another data type, you should remember that in common case, it is an unsafe operation that can lead to a data loss. A data loss can occur if you try to fit value from a larger data type to a smaller data type, or if you convert values between different data types.
When you convert a value from one to another data type, you should remember that if you try to fit a value from a larger data type to a smaller one (for example Int64 to Int32), or convert from one data type to another (for example `String` to `Int`), you could have data loss. Test beforehand.
ClickHouse has the [same behavior as C++ programs](https://en.cppreference.com/w/cpp/language/implicit_conversion).
@ -45,7 +45,7 @@ SELECT toInt64(nan), toInt32(32), toInt16('16'), toInt8(8.8);
Result:
``` text
```response
┌─────────toInt64(nan)─┬─toInt32(32)─┬─toInt16('16')─┬─toInt8(8.8)─┐
│ -9223372036854775808 │ 32 │ 16 │ 8 │
└──────────────────────┴─────────────┴───────────────┴─────────────┘
@ -65,7 +65,7 @@ SELECT toInt64OrZero('123123'), toInt8OrZero('123qwe123');
Result:
``` text
```response
┌─toInt64OrZero('123123')─┬─toInt8OrZero('123qwe123')─┐
│ 123123 │ 0 │
└─────────────────────────┴───────────────────────────┘
@ -85,7 +85,7 @@ SELECT toInt64OrNull('123123'), toInt8OrNull('123qwe123');
Result:
``` text
```response
┌─toInt64OrNull('123123')─┬─toInt8OrNull('123qwe123')─┐
│ 123123 │ ᴺᵁᴸᴸ │
└─────────────────────────┴───────────────────────────┘
@ -105,7 +105,7 @@ SELECT toInt64OrDefault('123123', cast('-1' as Int64)), toInt8OrDefault('123qwe1
Result:
``` text
```response
┌─toInt64OrDefault('123123', CAST('-1', 'Int64'))─┬─toInt8OrDefault('123qwe123', CAST('-1', 'Int8'))─┐
│ 123123 │ -1 │
└─────────────────────────────────────────────────┴──────────────────────────────────────────────────┘
@ -144,7 +144,7 @@ SELECT toUInt64(nan), toUInt32(-32), toUInt16('16'), toUInt8(8.8);
Result:
``` text
```response
┌───────toUInt64(nan)─┬─toUInt32(-32)─┬─toUInt16('16')─┬─toUInt8(8.8)─┐
│ 9223372036854775808 │ 4294967264 │ 16 │ 8 │
└─────────────────────┴───────────────┴────────────────┴──────────────┘
@ -314,7 +314,7 @@ Type: [Date32](/docs/en/sql-reference/data-types/date32.md).
SELECT toDate32('1955-01-01') AS value, toTypeName(value);
```
``` text
```response
┌──────value─┬─toTypeName(toDate32('1925-01-01'))─┐
│ 1955-01-01 │ Date32 │
└────────────┴────────────────────────────────────┘
@ -326,7 +326,7 @@ SELECT toDate32('1955-01-01') AS value, toTypeName(value);
SELECT toDate32('1899-01-01') AS value, toTypeName(value);
```
``` text
```response
┌──────value─┬─toTypeName(toDate32('1899-01-01'))─┐
│ 1900-01-01 │ Date32 │
└────────────┴────────────────────────────────────┘
@ -338,7 +338,7 @@ SELECT toDate32('1899-01-01') AS value, toTypeName(value);
SELECT toDate32(toDate('1899-01-01')) AS value, toTypeName(value);
```
``` text
```response
┌──────value─┬─toTypeName(toDate32(toDate('1899-01-01')))─┐
│ 1970-01-01 │ Date32 │
└────────────┴────────────────────────────────────────────┘
@ -358,7 +358,7 @@ SELECT toDate32OrZero('1899-01-01'), toDate32OrZero('');
Result:
``` text
```response
┌─toDate32OrZero('1899-01-01')─┬─toDate32OrZero('')─┐
│ 1900-01-01 │ 1900-01-01 │
└──────────────────────────────┴────────────────────┘
@ -378,7 +378,7 @@ SELECT toDate32OrNull('1955-01-01'), toDate32OrNull('');
Result:
``` text
```response
┌─toDate32OrNull('1955-01-01')─┬─toDate32OrNull('')─┐
│ 1955-01-01 │ ᴺᵁᴸᴸ │
└──────────────────────────────┴────────────────────┘
@ -400,7 +400,7 @@ SELECT
Result:
``` text
```response
┌─toDate32OrDefault('1930-01-01', toDate32('2020-01-01'))─┬─toDate32OrDefault('xx1930-01-01', toDate32('2020-01-01'))─┐
│ 1930-01-01 │ 2020-01-01 │
└─────────────────────────────────────────────────────────┴───────────────────────────────────────────────────────────┘
@ -436,7 +436,7 @@ Type: [DateTime64](/docs/en/sql-reference/data-types/datetime64.md).
SELECT toDateTime64('1955-01-01 00:00:00.000', 3) AS value, toTypeName(value);
```
``` text
```response
┌───────────────────value─┬─toTypeName(toDateTime64('1955-01-01 00:00:00.000', 3))─┐
│ 1955-01-01 00:00:00.000 │ DateTime64(3) │
└─────────────────────────┴────────────────────────────────────────────────────────┘
@ -448,7 +448,7 @@ SELECT toDateTime64('1955-01-01 00:00:00.000', 3) AS value, toTypeName(value);
SELECT toDateTime64(1546300800.000, 3) AS value, toTypeName(value);
```
``` text
```response
┌───────────────────value─┬─toTypeName(toDateTime64(1546300800., 3))─┐
│ 2019-01-01 00:00:00.000 │ DateTime64(3) │
└─────────────────────────┴──────────────────────────────────────────┘
@ -460,7 +460,7 @@ Without the decimal point the value is still treated as Unix Timestamp in second
SELECT toDateTime64(1546300800000, 3) AS value, toTypeName(value);
```
``` text
```response
┌───────────────────value─┬─toTypeName(toDateTime64(1546300800000, 3))─┐
│ 2282-12-31 00:00:00.000 │ DateTime64(3) │
└─────────────────────────┴────────────────────────────────────────────┘
@ -473,7 +473,7 @@ SELECT toDateTime64(1546300800000, 3) AS value, toTypeName(value);
SELECT toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul') AS value, toTypeName(value);
```
``` text
```response
┌───────────────────value─┬─toTypeName(toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul'))─┐
│ 2019-01-01 00:00:00.000 │ DateTime64(3, 'Asia/Istanbul') │
└─────────────────────────┴─────────────────────────────────────────────────────────────────────┘
@ -522,7 +522,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 5) AS val, toTypeName(val);
Result:
``` text
```response
┌────val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 5))─┐
│ -1.111 │ Nullable(Decimal(9, 5)) │
└────────┴────────────────────────────────────────────────────┘
@ -536,7 +536,7 @@ SELECT toDecimal32OrNull(toString(-1.111), 2) AS val, toTypeName(val);
Result:
``` text
```response
┌──val─┬─toTypeName(toDecimal32OrNull(toString(-1.111), 2))─┐
│ ᴺᵁᴸᴸ │ Nullable(Decimal(9, 2)) │
└──────┴────────────────────────────────────────────────────┘
@ -576,7 +576,7 @@ SELECT toDecimal32OrDefault(toString(-1.111), 5) AS val, toTypeName(val);
Result:
``` text
```response
┌────val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴───────────────────────────────────────────────────────┘
@ -590,7 +590,7 @@ SELECT toDecimal32OrDefault(toString(-1.111), 2) AS val, toTypeName(val);
Result:
``` text
```response
┌─val─┬─toTypeName(toDecimal32OrDefault(toString(-1.111), 2))─┐
│ 0 │ Decimal(9, 2) │
└─────┴───────────────────────────────────────────────────────┘
@ -629,7 +629,7 @@ SELECT toDecimal32OrZero(toString(-1.111), 5) AS val, toTypeName(val);
Result:
``` text
```response
┌────val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 5))─┐
│ -1.111 │ Decimal(9, 5) │
└────────┴────────────────────────────────────────────────────┘
@ -643,7 +643,7 @@ SELECT toDecimal32OrZero(toString(-1.111), 2) AS val, toTypeName(val);
Result:
``` text
```response
┌──val─┬─toTypeName(toDecimal32OrZero(toString(-1.111), 2))─┐
│ 0.00 │ Decimal(9, 2) │
└──────┴────────────────────────────────────────────────────┘
@ -661,7 +661,7 @@ When converting dates with times to numbers or vice versa, the date with time co
The date and date-with-time formats for the toDate/toDateTime functions are defined as follows:
``` text
```response
YYYY-MM-DD
YYYY-MM-DD hh:mm:ss
```
@ -686,7 +686,7 @@ SELECT
Result:
``` text
```response
┌───────────now_local─┬─now_yekat───────────┐
│ 2016-06-15 00:11:21 │ 2016-06-15 02:11:21 │
└─────────────────────┴─────────────────────┘
@ -713,7 +713,7 @@ SELECT toFixedString('foo', 8) AS s, toStringCutToZero(s) AS s_cut;
Result:
``` text
```response
┌─s─────────────┬─s_cut─┐
│ foo\0\0\0\0\0 │ foo │
└───────────────┴───────┘
@ -727,7 +727,7 @@ SELECT toFixedString('foo\0bar', 8) AS s, toStringCutToZero(s) AS s_cut;
Result:
``` text
```response
┌─s──────────┬─s_cut─┐
│ foo\0bar\0 │ foo │
└────────────┴───────┘
@ -755,6 +755,10 @@ This function accepts a number or date or date with time and returns a FixedStri
## reinterpretAsUUID
:::note
In addition to the UUID functions listed here, there is dedicated [UUID function documentation](/docs/en/sql-reference/functions/uuid-functions.md).
:::
Accepts 16 bytes string and returns UUID containing bytes representing the corresponding value in network byte order (big-endian). If the string isn't long enough, the function works as if the string is padded with the necessary number of null bytes to the end. If the string is longer than 16 bytes, the extra bytes at the end are ignored.
**Syntax**
@ -783,7 +787,7 @@ SELECT reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')));
Result:
``` text
```response
┌─reinterpretAsUUID(reverse(unhex('000102030405060708090a0b0c0d0e0f')))─┐
│ 08090a0b-0c0d-0e0f-0001-020304050607 │
└───────────────────────────────────────────────────────────────────────┘
@ -803,7 +807,7 @@ SELECT uuid = uuid2;
Result:
``` text
```response
┌─equals(uuid, uuid2)─┐
│ 1 │
└─────────────────────┘
@ -904,7 +908,7 @@ SELECT
Result:
``` text
```response
┌─timestamp───────────┬────────────datetime─┬───────date─┬─string──────────────┬─fixed_string──────────────┐
│ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00 │ 2016-06-15 │ 2016-06-15 23:00:00 │ 2016-06-15 23:00:00\0\0\0 │
└─────────────────────┴─────────────────────┴────────────┴─────────────────────┴───────────────────────────┘
@ -924,7 +928,7 @@ SELECT toTypeName(x) FROM t_null;
Result:
``` text
```response
┌─toTypeName(x)─┐
│ Int8 │
│ Int8 │
@ -939,7 +943,7 @@ SELECT toTypeName(CAST(x, 'Nullable(UInt16)')) FROM t_null;
Result:
``` text
```response
┌─toTypeName(CAST(x, 'Nullable(UInt16)'))─┐
│ Nullable(UInt16) │
│ Nullable(UInt16) │
@ -966,7 +970,7 @@ SELECT cast(-1, 'UInt8') as uint8;
Result:
``` text
```response
┌─uint8─┐
│ 255 │
└───────┘
@ -980,7 +984,7 @@ SELECT accurateCast(-1, 'UInt8') as uint8;
Result:
``` text
```response
Code: 70. DB::Exception: Received from localhost:9000. DB::Exception: Value in column Int8 cannot be safely converted into type UInt8: While processing accurateCast(-1, 'UInt8') AS uint8.
```
@ -1013,7 +1017,7 @@ SELECT toTypeName(accurateCastOrNull(5, 'UInt8'));
Result:
``` text
```response
┌─toTypeName(accurateCastOrNull(5, 'UInt8'))─┐
│ Nullable(UInt8) │
└────────────────────────────────────────────┘
@ -1030,7 +1034,7 @@ SELECT
Result:
``` text
```response
┌─uint8─┬─int8─┬─fixed_string─┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
└───────┴──────┴──────────────┘
@ -1067,7 +1071,7 @@ SELECT toTypeName(accurateCastOrDefault(5, 'UInt8'));
Result:
``` text
```response
┌─toTypeName(accurateCastOrDefault(5, 'UInt8'))─┐
│ UInt8 │
└───────────────────────────────────────────────┘
@ -1087,7 +1091,7 @@ SELECT
Result:
``` text
```response
┌─uint8─┬─uint8_default─┬─int8─┬─int8_default─┬─fixed_string─┬─fixed_string_default─┐
│ 0 │ 5 │ 0 │ 5 │ │ Te │
└───────┴───────────────┴──────┴──────────────┴──────────────┴──────────────────────┘
@ -1134,7 +1138,7 @@ SELECT
Result:
``` text
```response
┌─plus(date, interval_week)─┬─plus(date, interval_to_week)─┐
│ 2019-01-08 │ 2019-01-08 │
└───────────────────────────┴──────────────────────────────┘
@ -1183,7 +1187,7 @@ AS parseDateTimeBestEffort;
Result:
``` text
```response
┌─parseDateTimeBestEffort─┐
│ 2020-10-23 12:12:57 │
└─────────────────────────┘
@ -1198,7 +1202,7 @@ AS parseDateTimeBestEffort;
Result:
``` text
```response
┌─parseDateTimeBestEffort─┐
│ 2018-08-18 10:22:16 │
└─────────────────────────┘
@ -1213,7 +1217,7 @@ AS parseDateTimeBestEffort;
Result:
``` text
```response
┌─parseDateTimeBestEffort─┐
│ 2015-07-07 12:04:41 │
└─────────────────────────┘
@ -1228,7 +1232,7 @@ AS parseDateTimeBestEffort;
Result:
``` text
```response
┌─parseDateTimeBestEffort─┐
│ 2018-10-23 10:12:12 │
└─────────────────────────┘
@ -1242,7 +1246,7 @@ SELECT parseDateTimeBestEffort('10 20:19');
Result:
``` text
```response
┌─parseDateTimeBestEffort('10 20:19')─┐
│ 2000-01-10 20:19:00 │
└─────────────────────────────────────┘
@ -1376,7 +1380,7 @@ SELECT toLowCardinality('1');
Result:
``` text
```response
┌─toLowCardinality('1')─┐
│ 1 │
└───────────────────────┘
@ -1419,7 +1423,7 @@ SELECT toUnixTimestamp64Milli(dt64);
Result:
``` text
```response
┌─toUnixTimestamp64Milli(dt64)─┐
│ 1568650812345 │
└──────────────────────────────┘
@ -1434,7 +1438,7 @@ SELECT toUnixTimestamp64Nano(dt64);
Result:
``` text
```response
┌─toUnixTimestamp64Nano(dt64)─┐
│ 1568650812345678000 │
└─────────────────────────────┘
@ -1474,7 +1478,7 @@ SELECT fromUnixTimestamp64Milli(i64, 'UTC');
Result:
``` text
```response
┌─fromUnixTimestamp64Milli(i64, 'UTC')─┐
│ 2009-02-13 23:31:31.011 │
└──────────────────────────────────────┘
@ -1510,7 +1514,7 @@ FROM numbers(3);
Result:
``` text
```response
┌─formatRow('CSV', number, 'good')─┐
│ 0,"good"
@ -1535,7 +1539,7 @@ SETTINGS format_custom_result_before_delimiter='<prefix>\n', format_custom_resul
Result:
``` text
```response
┌─formatRow('CustomSeparated', number, 'good')─┐
<prefix>
0 good
@ -1581,7 +1585,7 @@ FROM numbers(3);
Result:
``` text
```response
┌─formatRowNoNewline('CSV', number, 'good')─┐
│ 0,"good" │
│ 1,"good" │
@ -1618,7 +1622,7 @@ SELECT snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC');
Result:
``` text
```response
┌─snowflakeToDateTime(CAST('1426860702823350272', 'Int64'), 'UTC')─┐
│ 2021-08-15 10:57:56 │
@ -1654,7 +1658,7 @@ SELECT snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC');
Result:
``` text
```response
┌─snowflakeToDateTime64(CAST('1426860802823350272', 'Int64'), 'UTC')─┐
│ 2021-08-15 10:58:19.841 │
@ -1689,7 +1693,7 @@ WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt SELECT dateTimeToS
Result:
``` text
```response
┌─dateTimeToSnowflake(dt)─┐
│ 1426860702823350272 │
└─────────────────────────┘
@ -1723,7 +1727,7 @@ WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 SELECT
Result:
``` text
```response
┌─dateTime64ToSnowflake(dt64)─┐
│ 1426860704886947840 │
└─────────────────────────────┘

View File

@ -38,7 +38,7 @@ INSERT INTO t_uuid SELECT generateUUIDv4()
SELECT * FROM t_uuid
```
``` text
```response
┌────────────────────────────────────x─┐
│ f4bf890f-f9dc-4332-ad5c-0c18e73f28e9 │
└──────────────────────────────────────┘
@ -89,7 +89,7 @@ SELECT empty(generateUUIDv4());
Result:
```text
```response
┌─empty(generateUUIDv4())─┐
│ 0 │
└─────────────────────────┘
@ -131,7 +131,7 @@ SELECT notEmpty(generateUUIDv4());
Result:
```text
```response
┌─notEmpty(generateUUIDv4())─┐
│ 1 │
└────────────────────────────┘
@ -155,12 +155,56 @@ The UUID type value.
SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') AS uuid
```
``` text
```response
┌─────────────────────────────────uuid─┐
│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │
└──────────────────────────────────────┘
```
## toUUIDOrDefault (x,y)
**Arguments**
- `string` — String of 36 characters or FixedString(36). [String](../../sql-reference/syntax.md#string).
- `default` — UUID to be used as the default if the first argument cannot be converted to a UUID type. [UUID](/docs/en/sql-reference/data-types/uuid.md).
**Returned value**
UUID
``` sql
toUUIDOrDefault(String, UUID)
```
**Returned value**
The UUID type value.
**Usage examples**
This first example returns the first argument converted to a UUID type as it can be converted:
``` sql
SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID));
```
```response
┌─toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', CAST('59f0c404-5cb3-11e7-907b-a6006ad3dba0', 'UUID'))─┐
│ 61f0c404-5cb3-11e7-907b-a6006ad3dba0 │
└───────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
This second example returns the second argument (the provided default UUID) as the first argument cannot be converted to a UUID type:
```sql
SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID));
```
```response
┌─toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', CAST('59f0c404-5cb3-11e7-907b-a6006ad3dba0', 'UUID'))─┐
│ 59f0c404-5cb3-11e7-907b-a6006ad3dba0 │
└────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
```
## toUUIDOrNull (x)
It takes an argument of type String and tries to parse it into UUID. If failed, returns NULL.
@ -179,7 +223,7 @@ The Nullable(UUID) type value.
SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
```
``` text
```response
┌─uuid─┐
│ ᴺᵁᴸᴸ │
└──────┘
@ -203,7 +247,7 @@ The UUID type value.
SELECT toUUIDOrZero('61f0c404-5cb3-11e7-907b-a6006ad3dba0T') AS uuid
```
``` text
```response
┌─────────────────────────────────uuid─┐
│ 00000000-0000-0000-0000-000000000000 │
└──────────────────────────────────────┘
@ -236,7 +280,7 @@ SELECT
UUIDStringToNum(uuid) AS bytes
```
``` text
```response
┌─uuid─────────────────────────────────┬─bytes────────────┐
│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ a/<@];!~p{jTj={) │
└──────────────────────────────────────┴──────────────────┘
@ -248,7 +292,7 @@ SELECT
UUIDStringToNum(uuid, 2) AS bytes
```
``` text
```response
┌─uuid─────────────────────────────────┬─bytes────────────┐
│ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │ @</a;]~!p{jTj={) │
└──────────────────────────────────────┴──────────────────┘
@ -281,7 +325,7 @@ SELECT
UUIDNumToString(toFixedString(bytes, 16)) AS uuid
```
``` text
```response
┌─bytes────────────┬─uuid─────────────────────────────────┐
│ a/<@];!~p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │
└──────────────────┴──────────────────────────────────────┘
@ -293,7 +337,7 @@ SELECT
UUIDNumToString(toFixedString(bytes, 16), 2) AS uuid
```
``` text
```response
┌─bytes────────────┬─uuid─────────────────────────────────┐
│ @</a;]~!p{jTj={) │ 612f3c40-5d3b-217e-707b-6a546a3d7b29 │
└──────────────────┴──────────────────────────────────────┘

View File

@ -91,11 +91,11 @@ INSERT INTO t FORMAT TabSeparated
You can insert data separately from the query by using the command-line client or the HTTP interface. For more information, see the section “[Interfaces](../../interfaces)”.
### Constraints
## Constraints
If table has [constraints](../../sql-reference/statements/create/table.md#constraints), their expressions will be checked for each row of inserted data. If any of those constraints is not satisfied — server will raise an exception containing constraint name and expression, the query will be stopped.
### Inserting the Results of `SELECT`
## Inserting the Results of `SELECT`
**Syntax**
@ -114,7 +114,7 @@ However, you can delete old data using `ALTER TABLE ... DROP PARTITION`.
To insert a default value instead of `NULL` into a column with not nullable data type, enable [insert_null_as_default](../../operations/settings/settings.md#insert_null_as_default) setting.
### Inserting Data from a File
## Inserting Data from a File
**Syntax**
@ -122,14 +122,15 @@ To insert a default value instead of `NULL` into a column with not nullable data
INSERT INTO [db.]table [(c1, c2, c3)] FROM INFILE file_name [COMPRESSION type] FORMAT format_name
```
Use the syntax above to insert data from a file stored on a **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.
Use the syntax above to insert data from a file, or files, stored on the **client** side. `file_name` and `type` are string literals. Input file [format](../../interfaces/formats.md) must be set in the `FORMAT` clause.
Compressed files are supported. Compression type is detected by the extension of the file name. Or it can be explicitly specified in a `COMPRESSION` clause. Supported types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
Compressed files are supported. The compression type is detected by the extension of the file name. Or it can be explicitly specified in a `COMPRESSION` clause. Supported types are: `'none'`, `'gzip'`, `'deflate'`, `'br'`, `'xz'`, `'zstd'`, `'lz4'`, `'bz2'`.
This functionality is available in the [command-line client](../../interfaces/cli.md) and [clickhouse-local](../../operations/utilities/clickhouse-local.md).
**Example**
**Examples**
### Single file with FROM INFILE
Execute the following queries using [command-line client](../../interfaces/cli.md):
```bash
@ -148,7 +149,27 @@ Result:
└────┴──────┘
```
### Inserting into Table Function
### Multiple files with FROM INFILE using globs
This example is very similar to the previous one but inserts from multiple files using `FROM INFILE 'input_*.csv`.
```bash
echo 1,A > input_1.csv ; echo 2,B > input_2.csv
clickhouse-client --query="CREATE TABLE infile_globs (id UInt32, text String) ENGINE=MergeTree() ORDER BY id;"
clickhouse-client --query="INSERT INTO infile_globs FROM INFILE 'input_*.csv' FORMAT CSV;"
clickhouse-client --query="SELECT * FROM infile_globs FORMAT PrettyCompact;"
```
:::tip
In addition to selecting multiple files with `*`, you can use ranges (`{1,2}` or `{1..9}`) and other [glob substitutions](/docs/en/sql-reference/table-functions/file.md/#globs-in-path). These three all would work with the above example:
```sql
INSERT INTO infile_globs FROM INFILE 'input_*.csv' FORMAT CSV;
INSERT INTO infile_globs FROM INFILE 'input_{1,2}.csv' FORMAT CSV;
INSERT INTO infile_globs FROM INFILE 'input_?.csv' FORMAT CSV;
```
:::
## Inserting into Table Function
Data can be inserted into tables referenced by [table functions](../../sql-reference/table-functions/index.md).
@ -176,7 +197,7 @@ Result:
└─────┴───────────────────────┘
```
### Performance Considerations
## Performance Considerations
`INSERT` sorts the input data by primary key and splits them into partitions by a partition key. If you insert data into several partitions at once, it can significantly reduce the performance of the `INSERT` query. To avoid this:

View File

@ -523,7 +523,7 @@ CREATE TABLE example_table
ENGINE = MergeTree
PARTITION BY toYYYYMM(d)
ORDER BY d
TTL d + INTERVAL 1 MONTH [DELETE],
TTL d + INTERVAL 1 MONTH DELETE,
d + INTERVAL 1 WEEK TO VOLUME 'aaa',
d + INTERVAL 2 WEEK TO DISK 'bbb';
```

View File

@ -479,7 +479,7 @@ CREATE TABLE example_table
ENGINE = MergeTree
PARTITION BY toYYYYMM(d)
ORDER BY d
TTL d + INTERVAL 1 MONTH [DELETE],
TTL d + INTERVAL 1 MONTH DELETE,
d + INTERVAL 1 WEEK TO VOLUME 'aaa',
d + INTERVAL 2 WEEK TO DISK 'bbb';
```

View File

@ -25,8 +25,6 @@
#include <IO/Operators.h>
#include <Poco/AccessExpireCache.h>
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/trim.hpp>
#include <re2/re2.h>
#include <filesystem>
#include <mutex>
@ -695,14 +693,7 @@ std::shared_ptr<const ContextAccess> AccessControl::getContextAccess(
/// Extract the last entry from comma separated list of X-Forwarded-For addresses.
/// Only the last proxy can be trusted (if any).
Strings forwarded_addresses;
boost::split(forwarded_addresses, client_info.forwarded_for, boost::is_any_of(","));
if (!forwarded_addresses.empty())
{
String & last_forwarded_address = forwarded_addresses.back();
boost::trim(last_forwarded_address);
params.forwarded_address = last_forwarded_address;
}
params.forwarded_address = client_info.getLastForwardedFor();
return getContextAccess(params);
}

View File

@ -580,13 +580,7 @@ if (ENABLE_TESTS)
# gtest framework has substandard code
target_compile_options(unit_tests_dbms PRIVATE
-Wno-zero-as-null-pointer-constant
-Wno-covered-switch-default
-Wno-undef
-Wno-sign-compare
-Wno-used-but-marked-unused
-Wno-missing-noreturn
-Wno-gnu-zero-variadic-macro-arguments
)
target_link_libraries(unit_tests_dbms PRIVATE

View File

@ -109,8 +109,7 @@ public:
template <typename... Args>
[[nodiscard]] bool emplace(Args &&... args)
{
emplaceImpl(std::nullopt /* timeout in milliseconds */, std::forward<Args...>(args...));
return true;
return emplaceImpl(std::nullopt /* timeout in milliseconds */, std::forward<Args...>(args...));
}
/// Returns false if queue is finished and empty

View File

@ -296,7 +296,7 @@ bool KeeperDispatcher::putRequest(const Coordination::ZooKeeperRequestPtr & requ
return true;
}
void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async)
void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async, const MultiVersion<Macros>::Version & macros)
{
LOG_DEBUG(log, "Initializing storage dispatcher");
@ -307,7 +307,7 @@ void KeeperDispatcher::initialize(const Poco::Util::AbstractConfiguration & conf
responses_thread = ThreadFromGlobalPool([this] { responseThread(); });
snapshot_thread = ThreadFromGlobalPool([this] { snapshotThread(); });
snapshot_s3.startup(config);
snapshot_s3.startup(config, macros);
server = std::make_unique<KeeperServer>(configuration_and_settings, config, responses_queue, snapshots_queue, snapshot_s3);
@ -687,7 +687,7 @@ bool KeeperDispatcher::isServerActive() const
return checkInit() && hasLeader() && !server->isRecovering();
}
void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config)
void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{
auto diff = server->getConfigurationDiff(config);
if (diff.empty())
@ -704,7 +704,7 @@ void KeeperDispatcher::updateConfiguration(const Poco::Util::AbstractConfigurati
throw Exception(ErrorCodes::SYSTEM_ERROR, "Cannot push configuration update to queue");
}
snapshot_s3.updateS3Configuration(config);
snapshot_s3.updateS3Configuration(config, macros);
}
void KeeperDispatcher::updateKeeperStatLatency(uint64_t process_time_ms)

View File

@ -15,6 +15,8 @@
#include <Coordination/Keeper4LWInfo.h>
#include <Coordination/KeeperConnectionStats.h>
#include <Coordination/KeeperSnapshotManagerS3.h>
#include <Common/MultiVersion.h>
#include <Common/Macros.h>
namespace DB
{
@ -109,7 +111,8 @@ public:
/// Initialization from config.
/// standalone_keeper -- we are standalone keeper application (not inside clickhouse server)
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async);
/// 'macros' are used to substitute macros in endpoint of disks
void initialize(const Poco::Util::AbstractConfiguration & config, bool standalone_keeper, bool start_async, const MultiVersion<Macros>::Version & macros);
void startServer();
@ -124,7 +127,8 @@ public:
/// Registered in ConfigReloader callback. Add new configuration changes to
/// update_configuration_queue. Keeper Dispatcher apply them asynchronously.
void updateConfiguration(const Poco::Util::AbstractConfiguration & config);
/// 'macros' are used to substitute macros in endpoint of disks
void updateConfiguration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
/// Shutdown internal keeper parts (server, state machine, log storage, etc)
void shutdown();

View File

@ -14,6 +14,7 @@
#include <IO/S3/PocoHTTPClient.h>
#include <IO/WriteHelpers.h>
#include <IO/copyData.h>
#include <Common/Macros.h>
#include <aws/core/auth/AWSCredentials.h>
#include <aws/s3/S3Client.h>
@ -47,7 +48,7 @@ KeeperSnapshotManagerS3::KeeperSnapshotManagerS3()
, uuid(UUIDHelpers::generateV4())
{}
void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config)
void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{
try
{
@ -64,7 +65,7 @@ void KeeperSnapshotManagerS3::updateS3Configuration(const Poco::Util::AbstractCo
auto auth_settings = S3::AuthSettings::loadFromConfig(config_prefix, config);
auto endpoint = config.getString(config_prefix + ".endpoint");
String endpoint = macros->expand(config.getString(config_prefix + ".endpoint"));
auto new_uri = S3::URI{endpoint};
{
@ -261,9 +262,9 @@ void KeeperSnapshotManagerS3::uploadSnapshot(const std::string & path, bool asyn
uploadSnapshotImpl(path);
}
void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config)
void KeeperSnapshotManagerS3::startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros)
{
updateS3Configuration(config);
updateS3Configuration(config, macros);
snapshot_s3_thread = ThreadFromGlobalPool([this] { snapshotS3Thread(); });
}

View File

@ -3,6 +3,8 @@
#include "config.h"
#include <Poco/Util/AbstractConfiguration.h>
#include <Common/MultiVersion.h>
#include <Common/Macros.h>
#if USE_AWS_S3
#include <Common/ConcurrentBoundedQueue.h>
@ -21,10 +23,12 @@ class KeeperSnapshotManagerS3
public:
KeeperSnapshotManagerS3();
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config);
/// 'macros' are used to substitute macros in endpoint of disks
void updateS3Configuration(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void uploadSnapshot(const std::string & path, bool async_upload = true);
void startup(const Poco::Util::AbstractConfiguration & config);
/// 'macros' are used to substitute macros in endpoint of disks
void startup(const Poco::Util::AbstractConfiguration & config, const MultiVersion<Macros>::Version & macros);
void shutdown();
private:
using SnapshotS3Queue = ConcurrentBoundedQueue<std::string>;
@ -56,10 +60,10 @@ class KeeperSnapshotManagerS3
public:
KeeperSnapshotManagerS3() = default;
void updateS3Configuration(const Poco::Util::AbstractConfiguration &) {}
void updateS3Configuration(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
void uploadSnapshot(const std::string &, [[maybe_unused]] bool async_upload = true) {}
void startup(const Poco::Util::AbstractConfiguration &) {}
void startup(const Poco::Util::AbstractConfiguration &, const MultiVersion<Macros>::Version &) {}
void shutdown() {}
};

View File

@ -248,7 +248,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
session_id = storage->getSessionID(session_id_request.session_timeout_ms);
LOG_DEBUG(log, "Session ID response {} with timeout {}", session_id, session_id_request.session_timeout_ms);
response->session_id = session_id;
if (!responses_queue.push(response_for_session))
if (!responses_queue.push(response_for_session) && !responses_queue.isFinished())
{
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
throw Exception(ErrorCodes::SYSTEM_ERROR, "Could not push response with session id {} into responses queue", session_id);
@ -261,7 +261,7 @@ nuraft::ptr<nuraft::buffer> KeeperStateMachine::commit(const uint64_t log_idx, n
KeeperStorage::ResponsesForSessions responses_for_sessions = storage->processRequest(
request_for_session.request, request_for_session.session_id, request_for_session.zxid);
for (auto & response_for_session : responses_for_sessions)
if (!responses_queue.push(response_for_session))
if (!responses_queue.push(response_for_session) && !responses_queue.isFinished())
{
ProfileEvents::increment(ProfileEvents::KeeperCommitsFailed);
throw Exception(
@ -523,7 +523,7 @@ void KeeperStateMachine::processReadRequest(const KeeperStorage::RequestForSessi
true /*check_acl*/,
true /*is_local*/);
for (const auto & response : responses)
if (!responses_queue.push(response))
if (!responses_queue.push(response) && !responses_queue.isFinished())
throw Exception(
ErrorCodes::SYSTEM_ERROR, "Could not push response with session id {} into responses queue", response.session_id);
}

View File

@ -36,7 +36,12 @@ void TinyContext::initializeKeeperDispatcher([[maybe_unused]] bool start_async)
if (config_ref.has("keeper_server"))
{
keeper_dispatcher = std::make_shared<KeeperDispatcher>();
keeper_dispatcher->initialize(config_ref, true, start_async);
MultiVersion<Macros>::Version macros;
if (config_ref.has("macros"))
macros = std::make_unique<Macros>(config_ref, "macros", &Poco::Logger::get("TinyContext"));
keeper_dispatcher->initialize(config_ref, true, start_async, macros);
}
}
@ -71,7 +76,12 @@ void TinyContext::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::A
if (!keeper_dispatcher)
return;
keeper_dispatcher->updateConfiguration(config_);
MultiVersion<Macros>::Version macros;
if (config_.has("macros"))
macros = std::make_unique<Macros>(config_, "macros", &Poco::Logger::get("TinyContext"));
keeper_dispatcher->updateConfiguration(config_, macros);
}
}

View File

@ -5,6 +5,7 @@
#include <Common/Exception.h>
#define MAX_FIXEDSTRING_SIZE 0xFFFFFF
#define MAX_FIXEDSTRING_SIZE_WITHOUT_SUSPICIOUS 256
namespace DB

View File

@ -1,36 +1,39 @@
#include <DataTypes/DataTypeString.h>
#include <Databases/DatabaseReplicated.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Interpreters/Context.h>
#include <Interpreters/executeQuery.h>
#include <Parsers/queryToString.h>
#include <utility>
#include <Backups/IRestoreCoordination.h>
#include <Backups/RestorerFromBackup.h>
#include <base/chrono_io.h>
#include <base/getFQDNOrHostName.h>
#include <Common/Exception.h>
#include <Common/Macros.h>
#include <Common/OpenTelemetryTraceContext.h>
#include <Common/ZooKeeper/KeeperException.h>
#include <Common/ZooKeeper/Types.h>
#include <Common/ZooKeeper/ZooKeeper.h>
#include <Databases/DatabaseReplicated.h>
#include <Databases/DatabaseReplicatedWorker.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/executeDDLQueryOnCluster.h>
#include <Databases/DDLDependencyVisitor.h>
#include <Databases/TablesDependencyGraph.h>
#include <Interpreters/Cluster.h>
#include <base/getFQDNOrHostName.h>
#include <Interpreters/Context.h>
#include <Interpreters/DDLTask.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Interpreters/executeDDLQueryOnCluster.h>
#include <Interpreters/executeQuery.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <IO/ReadBufferFromFile.h>
#include <IO/ReadBufferFromString.h>
#include <IO/ReadHelpers.h>
#include <IO/WriteHelpers.h>
#include <Parsers/ASTAlterQuery.h>
#include <Parsers/ASTDropQuery.h>
#include <Parsers/ASTFunction.h>
#include <Parsers/ParserCreateQuery.h>
#include <Parsers/parseQuery.h>
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/evaluateConstantExpression.h>
#include <Parsers/formatAST.h>
#include <Backups/IRestoreCoordination.h>
#include <Backups/RestorerFromBackup.h>
#include <Common/Macros.h>
#include <base/chrono_io.h>
#include <utility>
#include <Parsers/parseQuery.h>
#include <Parsers/ParserCreateQuery.h>
#include <Parsers/queryToString.h>
namespace DB
{
@ -905,31 +908,37 @@ void DatabaseReplicated::recoverLostReplica(const ZooKeeperPtr & current_zookeep
for (const auto & id : dropped_tables)
DatabaseCatalog::instance().waitTableFinallyDropped(id);
/// FIXME: Use proper dependency calculation instead of just moving MV to the end
using NameToMetadata = std::pair<String, String>;
std::vector<NameToMetadata> table_name_to_metadata_sorted;
table_name_to_metadata_sorted.reserve(table_name_to_metadata.size());
std::move(table_name_to_metadata.begin(), table_name_to_metadata.end(), std::back_inserter(table_name_to_metadata_sorted));
std::sort(table_name_to_metadata_sorted.begin(), table_name_to_metadata_sorted.end(), [](const NameToMetadata & lhs, const NameToMetadata & rhs) -> bool
{
const bool is_materialized_view_lhs = lhs.second.find("MATERIALIZED VIEW") != std::string::npos;
const bool is_materialized_view_rhs = rhs.second.find("MATERIALIZED VIEW") != std::string::npos;
return is_materialized_view_lhs < is_materialized_view_rhs;
});
for (const auto & name_and_meta : table_name_to_metadata_sorted)
/// Create all needed tables in a proper order
TablesDependencyGraph tables_dependencies("DatabaseReplicated (" + getDatabaseName() + ")");
for (const auto & [table_name, create_table_query] : table_name_to_metadata)
{
if (isTableExist(name_and_meta.first, getContext()))
/// Note that table_name could contain a dot inside (e.g. .inner.1234-1234-1234-1234)
/// And QualifiedTableName::parseFromString doesn't handle this.
auto qualified_name = QualifiedTableName{.database = getDatabaseName(), .table = table_name};
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_table_query);
tables_dependencies.addDependencies(qualified_name, getDependenciesFromCreateQuery(getContext(), qualified_name, query_ast));
}
tables_dependencies.checkNoCyclicDependencies();
auto tables_to_create = tables_dependencies.getTablesSortedByDependency();
for (const auto & table_id : tables_to_create)
{
auto table_name = table_id.getTableName();
auto create_query_string = table_name_to_metadata[table_name];
if (isTableExist(table_name, getContext()))
{
assert(name_and_meta.second == readMetadataFile(name_and_meta.first));
assert(create_query_string == readMetadataFile(table_name));
continue;
}
auto query_ast = parseQueryFromMetadataInZooKeeper(name_and_meta.first, name_and_meta.second);
auto query_ast = parseQueryFromMetadataInZooKeeper(table_name, create_query_string);
LOG_INFO(log, "Executing {}", serializeAST(*query_ast));
auto create_query_context = make_query_context();
InterpreterCreateQuery(query_ast, create_query_context).execute();
}
LOG_INFO(log, "All tables are created successfully");
if (max_log_ptr_at_creation != 0)
{

View File

@ -4,6 +4,7 @@
#include <Disks/ObjectStorages/MetadataStorageFromDisk.h>
#include <Disks/DiskFactory.h>
#include <Storages/HDFS/HDFSCommon.h>
#include <Common/Macros.h>
namespace DB
{
@ -22,7 +23,8 @@ void registerDiskHDFS(DiskFactory & factory, bool global_skip_access_check)
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
String uri{config.getString(config_prefix + ".endpoint")};
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
String uri{endpoint};
checkHDFSURL(uri);
if (uri.back() != '/')

View File

@ -1,5 +1,7 @@
#include <Disks/ObjectStorages/S3/S3ObjectStorage.h>
#include <Common/ProfileEvents.h>
#include <Interpreters/Context.h>
#if USE_AWS_S3
@ -31,6 +33,7 @@
#include <Common/StringUtils/StringUtils.h>
#include <Common/logger_useful.h>
#include <Common/MultiVersion.h>
#include <Common/Macros.h>
namespace ProfileEvents
@ -634,10 +637,11 @@ std::unique_ptr<IObjectStorage> S3ObjectStorage::cloneObjectStorage(
{
auto new_s3_settings = getSettings(config, config_prefix, context);
auto new_client = getClient(config, config_prefix, context, *new_s3_settings);
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
return std::make_unique<S3ObjectStorage>(
std::move(new_client), std::move(new_s3_settings),
version_id, s3_capabilities, new_namespace,
config.getString(config_prefix + ".endpoint"));
endpoint);
}
}

View File

@ -21,6 +21,7 @@
#include <Disks/ObjectStorages/S3/ProxyResolverConfiguration.h>
#include <Disks/ObjectStorages/DiskObjectStorageCommon.h>
#include <Disks/DiskLocal.h>
#include <Common/Macros.h>
namespace DB
{
@ -121,7 +122,8 @@ std::unique_ptr<Aws::S3::S3Client> getClient(
settings.request_settings.get_request_throttler,
settings.request_settings.put_request_throttler);
S3::URI uri(config.getString(config_prefix + ".endpoint"));
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
S3::URI uri(endpoint);
if (uri.key.back() != '/')
throw Exception("S3 path must ends with '/', but '" + uri.key + "' doesn't.", ErrorCodes::BAD_ARGUMENTS);

View File

@ -23,6 +23,7 @@
#include <Storages/StorageS3Settings.h>
#include <Core/ServerUUID.h>
#include <Common/Macros.h>
namespace DB
@ -104,7 +105,8 @@ void registerDiskS3(DiskFactory & factory, bool global_skip_access_check)
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
S3::URI uri(config.getString(config_prefix + ".endpoint"));
String endpoint = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
S3::URI uri(endpoint);
if (uri.key.empty())
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No key in S3 uri: {}", uri.uri.toString());

View File

@ -5,6 +5,9 @@
#include <Disks/ObjectStorages/Web/MetadataStorageFromStaticFilesWebServer.h>
#include <Disks/ObjectStorages/DiskObjectStorage.h>
#include <Common/assert_cast.h>
#include <Common/Macros.h>
#include <Interpreters/Context.h>
namespace DB
{
@ -23,7 +26,7 @@ void registerDiskWebServer(DiskFactory & factory, bool global_skip_access_check)
ContextPtr context,
const DisksMap & /*map*/) -> DiskPtr
{
String uri{config.getString(config_prefix + ".endpoint")};
String uri = context->getMacros()->expand(config.getString(config_prefix + ".endpoint"));
bool skip_access_check = global_skip_access_check || config.getBool(config_prefix + ".skip_access_check", false);
if (!uri.ends_with('/'))

View File

@ -432,7 +432,7 @@ String getAdditionalFormatInfoByEscapingRule(const FormatSettings & settings, Fo
settings.json.read_bools_as_numbers,
settings.json.read_objects_as_strings,
settings.json.read_numbers_as_strings,
settings.json.try_infer_objects);
settings.json.allow_object_type);
break;
default:
break;

View File

@ -103,7 +103,7 @@ FormatSettings getFormatSettings(ContextPtr context, const Settings & settings)
format_settings.json.validate_types_from_metadata = settings.input_format_json_validate_types_from_metadata;
format_settings.json.validate_utf8 = settings.output_format_json_validate_utf8;
format_settings.json_object_each_row.column_for_object_name = settings.format_json_object_each_row_column_for_object_name;
format_settings.json.try_infer_objects = context->getSettingsRef().allow_experimental_object_type;
format_settings.json.allow_object_type = context->getSettingsRef().allow_experimental_object_type;
format_settings.null_as_default = settings.input_format_null_as_default;
format_settings.decimal_trailing_zeros = settings.output_format_decimal_trailing_zeros;
format_settings.parquet.row_group_size = settings.output_format_parquet_row_group_size;

View File

@ -161,7 +161,7 @@ struct FormatSettings
bool try_infer_numbers_from_strings = false;
bool validate_types_from_metadata = true;
bool validate_utf8 = false;
bool try_infer_objects = false;
bool allow_object_type = false;
} json;
struct

View File

@ -366,7 +366,7 @@ namespace
transformJSONTuplesAndArraysToArrays(data_types, settings, type_indexes, json_info);
/// Convert Maps to Objects if needed.
if (settings.json.try_infer_objects)
if (settings.json.allow_object_type)
transformMapsAndObjectsToObjects(data_types, type_indexes);
if (settings.json.read_objects_as_strings)
@ -716,7 +716,7 @@ namespace
{
if constexpr (is_json)
{
if (settings.json.try_infer_objects)
if (settings.json.allow_object_type)
return std::make_shared<DataTypeObject>("json", true);
}
/// Empty Map is Map(Nothing, Nothing)
@ -735,7 +735,7 @@ namespace
transformInferredTypesIfNeededImpl<is_json>(value_types, settings, json_info);
if (!checkIfTypesAreEqual(value_types))
{
if (settings.json.try_infer_objects)
if (settings.json.allow_object_type)
return std::make_shared<DataTypeObject>("json", true);
if (settings.json.read_objects_as_strings)
return std::make_shared<DataTypeString>();

View File

@ -1,5 +1,6 @@
#pragma once
#include <Functions/FunctionsConversion.h>
#include <Interpreters/parseColumnsListForTableFunction.h>
namespace DB
{
@ -32,10 +33,11 @@ public:
ColumnNumbers getArgumentsThatAreAlwaysConstant() const override { return {1}; }
explicit CastOverloadResolverImpl(std::optional<Diagnostic> diagnostic_, bool keep_nullable_, bool cast_ipv4_ipv6_default_on_conversion_error_)
explicit CastOverloadResolverImpl(std::optional<Diagnostic> diagnostic_, bool keep_nullable_, bool cast_ipv4_ipv6_default_on_conversion_error_, const DataTypeValidationSettings & data_type_validation_settings_)
: diagnostic(std::move(diagnostic_))
, keep_nullable(keep_nullable_)
, cast_ipv4_ipv6_default_on_conversion_error(cast_ipv4_ipv6_default_on_conversion_error_)
, data_type_validation_settings(data_type_validation_settings_)
{
}
@ -46,13 +48,13 @@ public:
if constexpr (internal)
return createImpl({}, false /*keep_nullable*/, settings_ref.cast_ipv4_ipv6_default_on_conversion_error);
return createImpl({}, settings_ref.cast_keep_nullable, settings_ref.cast_ipv4_ipv6_default_on_conversion_error);
return createImpl({}, settings_ref.cast_keep_nullable, settings_ref.cast_ipv4_ipv6_default_on_conversion_error, DataTypeValidationSettings(settings_ref));
}
static FunctionOverloadResolverPtr createImpl(std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false, bool cast_ipv4_ipv6_default_on_conversion_error = false)
static FunctionOverloadResolverPtr createImpl(std::optional<Diagnostic> diagnostic = {}, bool keep_nullable = false, bool cast_ipv4_ipv6_default_on_conversion_error = false, const DataTypeValidationSettings & data_type_validation_settings = {})
{
assert(!internal || !keep_nullable);
return std::make_unique<CastOverloadResolverImpl>(std::move(diagnostic), keep_nullable, cast_ipv4_ipv6_default_on_conversion_error);
return std::make_unique<CastOverloadResolverImpl>(std::move(diagnostic), keep_nullable, cast_ipv4_ipv6_default_on_conversion_error, data_type_validation_settings);
}
protected:
@ -83,6 +85,7 @@ protected:
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT);
DataTypePtr type = DataTypeFactory::instance().get(type_col->getValue<String>());
validateDataType(type, data_type_validation_settings);
if constexpr (cast_type == CastType::accurateOrNull)
return makeNullable(type);
@ -104,6 +107,7 @@ private:
std::optional<Diagnostic> diagnostic;
bool keep_nullable;
bool cast_ipv4_ipv6_default_on_conversion_error;
DataTypeValidationSettings data_type_validation_settings;
};

View File

@ -177,7 +177,7 @@ namespace
bool checkRequestCanReturn2xxAndErrorInBody(Aws::Http::HttpRequest & request)
{
auto query_params = request.GetQueryStringParameters();
if (request.HasHeader("z-amz-copy-source"))
if (request.HasHeader("x-amz-copy-source"))
{
/// CopyObject https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
if (query_params.empty())

View File

@ -149,13 +149,12 @@ class AWSEC2MetadataClient : public Aws::Internal::AWSHttpResourceClient
static constexpr char EC2_IMDS_TOKEN_TTL_DEFAULT_VALUE[] = "21600";
static constexpr char EC2_IMDS_TOKEN_TTL_HEADER[] = "x-aws-ec2-metadata-token-ttl-seconds";
static constexpr char EC2_DEFAULT_METADATA_ENDPOINT[] = "http://169.254.169.254";
public:
/// See EC2MetadataClient.
explicit AWSEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration)
explicit AWSEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration, const char * endpoint_)
: Aws::Internal::AWSHttpResourceClient(client_configuration)
, endpoint(endpoint_)
, logger(&Poco::Logger::get("AWSEC2InstanceProfileConfigLoader"))
{
}
@ -180,7 +179,7 @@ public:
{
std::lock_guard locker(token_mutex);
LOG_TRACE(logger, "Getting default credentials for EC2 instance.");
LOG_TRACE(logger, "Getting default credentials for ec2 instance from {}", endpoint);
auto result = GetResourceWithAWSWebServiceResult(endpoint.c_str(), EC2_SECURITY_CREDENTIALS_RESOURCE, nullptr);
credentials_string = result.GetPayload();
if (result.GetResponseCode() == Aws::Http::HttpResponseCode::UNAUTHORIZED)
@ -286,12 +285,50 @@ public:
}
private:
const Aws::String endpoint = EC2_DEFAULT_METADATA_ENDPOINT;
const Aws::String endpoint;
mutable std::recursive_mutex token_mutex;
mutable Aws::String token;
Poco::Logger * logger;
};
std::shared_ptr<AWSEC2MetadataClient> InitEC2MetadataClient(const Aws::Client::ClientConfiguration & client_configuration)
{
Aws::String ec2_metadata_service_endpoint = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT");
auto * logger = &Poco::Logger::get("AWSEC2InstanceProfileConfigLoader");
if (ec2_metadata_service_endpoint.empty())
{
Aws::String ec2_metadata_service_endpoint_mode = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE");
if (ec2_metadata_service_endpoint_mode.length() == 0)
{
ec2_metadata_service_endpoint = "http://169.254.169.254"; //default to IPv4 default endpoint
}
else
{
if (ec2_metadata_service_endpoint_mode.length() == 4)
{
if (Aws::Utils::StringUtils::CaselessCompare(ec2_metadata_service_endpoint_mode.c_str(), "ipv4"))
{
ec2_metadata_service_endpoint = "http://169.254.169.254"; //default to IPv4 default endpoint
}
else if (Aws::Utils::StringUtils::CaselessCompare(ec2_metadata_service_endpoint_mode.c_str(), "ipv6"))
{
ec2_metadata_service_endpoint = "http://[fd00:ec2::254]";
}
else
{
LOG_ERROR(logger, "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE can only be set to ipv4 or ipv6, received: {}", ec2_metadata_service_endpoint_mode);
}
}
else
{
LOG_ERROR(logger, "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE can only be set to ipv4 or ipv6, received: {}", ec2_metadata_service_endpoint_mode);
}
}
}
LOG_INFO(logger, "Using IMDS endpoint: {}", ec2_metadata_service_endpoint);
return std::make_shared<AWSEC2MetadataClient>(client_configuration, ec2_metadata_service_endpoint.c_str());
}
class AWSEC2InstanceProfileConfigLoader : public Aws::Config::AWSProfileConfigLoader
{
public:
@ -646,7 +683,7 @@ public:
aws_client_configuration.retryStrategy = std::make_shared<Aws::Client::DefaultRetryStrategy>(1, 1000);
auto ec2_metadata_client = std::make_shared<AWSEC2MetadataClient>(aws_client_configuration);
auto ec2_metadata_client = InitEC2MetadataClient(aws_client_configuration);
auto config_loader = std::make_shared<AWSEC2InstanceProfileConfigLoader>(ec2_metadata_client, !use_insecure_imds_request);
AddProvider(std::make_shared<AWSInstanceProfileCredentialsProvider>(config_loader));

View File

@ -4,6 +4,7 @@
#include <Poco/Net/SocketAddress.h>
#include <base/types.h>
#include <Common/OpenTelemetryTraceContext.h>
#include <boost/algorithm/string/trim.hpp>
namespace DB
{
@ -101,6 +102,14 @@ public:
/// The element can be trusted only if you trust the corresponding proxy.
/// NOTE This field can also be reused in future for TCP interface with PROXY v1/v2 protocols.
String forwarded_for;
String getLastForwardedFor() const
{
if (forwarded_for.empty())
return {};
String last = forwarded_for.substr(forwarded_for.find_last_of(',') + 1);
boost::trim(last);
return last;
}
/// Common
String quota_key;

View File

@ -2356,7 +2356,7 @@ void Context::initializeKeeperDispatcher([[maybe_unused]] bool start_async) cons
}
shared->keeper_dispatcher = std::make_shared<KeeperDispatcher>();
shared->keeper_dispatcher->initialize(config, is_standalone_app, start_async);
shared->keeper_dispatcher->initialize(config, is_standalone_app, start_async, getMacros());
}
#endif
}
@ -2398,7 +2398,7 @@ void Context::updateKeeperConfiguration([[maybe_unused]] const Poco::Util::Abstr
if (!shared->keeper_dispatcher)
return;
shared->keeper_dispatcher->updateConfiguration(config);
shared->keeper_dispatcher->updateConfiguration(config, getMacros());
#endif
}

View File

@ -76,8 +76,6 @@
#include <Functions/UserDefined/UserDefinedSQLFunctionVisitor.h>
#define MAX_FIXEDSTRING_SIZE_WITHOUT_SUSPICIOUS 256
namespace DB
{

View File

@ -4,6 +4,9 @@
#include <Interpreters/InterpreterCreateQuery.h>
#include <Interpreters/Context.h>
#include <Interpreters/parseColumnsListForTableFunction.h>
#include <DataTypes/DataTypeLowCardinality.h>
#include <DataTypes/DataTypeFixedString.h>
#include <DataTypes/DataTypeNullable.h>
namespace DB
@ -12,9 +15,66 @@ namespace DB
namespace ErrorCodes
{
extern const int LOGICAL_ERROR;
extern const int SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY;
extern const int ILLEGAL_COLUMN;
}
ColumnsDescription parseColumnsListFromString(const std::string & structure, ContextPtr context)
void validateDataType(const DataTypePtr & type, const DataTypeValidationSettings & settings)
{
if (!settings.allow_suspicious_low_cardinality_types)
{
if (const auto * lc_type = typeid_cast<const DataTypeLowCardinality *>(type.get()))
{
if (!isStringOrFixedString(*removeNullable(lc_type->getDictionaryType())))
throw Exception(
ErrorCodes::SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY,
"Creating columns of type {} is prohibited by default due to expected negative impact on performance. "
"It can be enabled with the \"allow_suspicious_low_cardinality_types\" setting.",
lc_type->getName());
}
}
if (!settings.allow_experimental_geo_types)
{
const auto & type_name = type->getName();
if (type_name == "MultiPolygon" || type_name == "Polygon" || type_name == "Ring" || type_name == "Point")
{
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Cannot create column with type '{}' because experimental geo types are not allowed. Set setting "
"allow_experimental_geo_types = 1 in order to allow it", type_name);
}
}
if (!settings.allow_experimental_object_type)
{
if (type->hasDynamicSubcolumns())
{
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Cannot create column with type '{}' because experimental Object type is not allowed. "
"Set setting allow_experimental_object_type = 1 in order to allow it", type->getName());
}
}
if (!settings.allow_suspicious_fixed_string_types)
{
auto basic_type = removeLowCardinality(removeNullable(type));
if (const auto * fixed_string = typeid_cast<const DataTypeFixedString *>(basic_type.get()))
{
if (fixed_string->getN() > MAX_FIXEDSTRING_SIZE_WITHOUT_SUSPICIOUS)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Cannot create column with type '{}' because fixed string with size > {} is suspicious. "
"Set setting allow_suspicious_fixed_string_types = 1 in order to allow it",
type->getName(),
MAX_FIXEDSTRING_SIZE_WITHOUT_SUSPICIOUS);
}
}
}
ColumnsDescription parseColumnsListFromString(const std::string & structure, const ContextPtr & context)
{
ParserColumnDeclarationList parser(true, true);
const Settings & settings = context->getSettingsRef();
@ -25,10 +85,14 @@ ColumnsDescription parseColumnsListFromString(const std::string & structure, Con
if (!columns_list)
throw Exception("Could not cast AST to ASTExpressionList", ErrorCodes::LOGICAL_ERROR);
return InterpreterCreateQuery::getColumnsDescription(*columns_list, context, false);
auto columns = InterpreterCreateQuery::getColumnsDescription(*columns_list, context, false);
auto validation_settings = DataTypeValidationSettings(context->getSettingsRef());
for (const auto & [name, type] : columns.getAll())
validateDataType(type, validation_settings);
return columns;
}
bool tryParseColumnsListFromString(const std::string & structure, ColumnsDescription & columns, ContextPtr context)
bool tryParseColumnsListFromString(const std::string & structure, ColumnsDescription & columns, const ContextPtr & context)
{
ParserColumnDeclarationList parser(true, true);
const Settings & settings = context->getSettingsRef();
@ -47,6 +111,9 @@ bool tryParseColumnsListFromString(const std::string & structure, ColumnsDescrip
try
{
columns = InterpreterCreateQuery::getColumnsDescription(*columns_list, context, false);
auto validation_settings = DataTypeValidationSettings(context->getSettingsRef());
for (const auto & [name, type] : columns.getAll())
validateDataType(type, validation_settings);
return true;
}
catch (...)

View File

@ -2,6 +2,7 @@
#include <string>
#include <Storages/ColumnsDescription.h>
#include <Core/Settings.h>
namespace DB
@ -9,9 +10,29 @@ namespace DB
class Context;
/// Parses a common argument for table functions such as table structure given in string
ColumnsDescription parseColumnsListFromString(const std::string & structure, ContextPtr context);
struct DataTypeValidationSettings
{
DataTypeValidationSettings() = default;
bool tryParseColumnsListFromString(const std::string & structure, ColumnsDescription & columns, ContextPtr context);
explicit DataTypeValidationSettings(const Settings & settings)
: allow_suspicious_low_cardinality_types(settings.allow_suspicious_low_cardinality_types)
, allow_experimental_geo_types(settings.allow_experimental_geo_types)
, allow_experimental_object_type(settings.allow_experimental_object_type)
, allow_suspicious_fixed_string_types(settings.allow_suspicious_fixed_string_types)
{
}
bool allow_suspicious_low_cardinality_types = true;
bool allow_experimental_geo_types = true;
bool allow_experimental_object_type = true;
bool allow_suspicious_fixed_string_types = true;
};
void validateDataType(const DataTypePtr & type, const DataTypeValidationSettings & settings);
/// Parses a common argument for table functions such as table structure given in string
ColumnsDescription parseColumnsListFromString(const std::string & structure, const ContextPtr & context);
bool tryParseColumnsListFromString(const std::string & structure, ColumnsDescription & columns, const ContextPtr & context);
}

View File

@ -35,16 +35,10 @@ void CSVRowOutputFormat::writePrefix()
const auto & sample = getPort(PortKind::Main).getHeader();
if (with_names)
{
writeLine(sample.getNames());
writeRowBetweenDelimiter();
}
if (with_types)
{
writeLine(sample.getDataTypeNames());
writeRowBetweenDelimiter();
}
}
@ -60,38 +54,21 @@ void CSVRowOutputFormat::writeFieldDelimiter()
}
void CSVRowOutputFormat::writeRowBetweenDelimiter()
void CSVRowOutputFormat::writeRowEndDelimiter()
{
if (format_settings.csv.crlf_end_of_line)
writeChar('\r', out);
writeChar('\n', out);
}
void CSVRowOutputFormat::writeSuffix()
{
/// Write '\n' after data if we had any data.
if (haveWrittenData())
writeRowBetweenDelimiter();
}
void CSVRowOutputFormat::writeBeforeTotals()
{
writeRowBetweenDelimiter();
writeChar('\n', out);
}
void CSVRowOutputFormat::writeBeforeExtremes()
{
writeRowBetweenDelimiter();
}
void CSVRowOutputFormat::writeAfterTotals()
{
writeRowBetweenDelimiter();
}
void CSVRowOutputFormat::writeAfterExtremes()
{
writeRowBetweenDelimiter();
writeChar('\n', out);
}

View File

@ -33,18 +33,15 @@ public:
private:
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
void writeFieldDelimiter() override;
void writeRowBetweenDelimiter() override;
void writeRowEndDelimiter() override;
bool supportTotals() const override { return true; }
bool supportExtremes() const override { return true; }
void writeBeforeTotals() override;
void writeAfterTotals() override;
void writeBeforeExtremes() override;
void writeAfterExtremes() override;
void writePrefix() override;
void writeSuffix() override;
void writeLine(const std::vector<String> & values);
bool with_names;

View File

@ -12,6 +12,7 @@ namespace ErrorCodes
{
extern const int BAD_ARGUMENTS;
extern const int INCORRECT_DATA;
extern const int ILLEGAL_COLUMN;
}
JSONAsRowInputFormat::JSONAsRowInputFormat(const Block & header_, ReadBuffer & in_, Params params_)
@ -206,6 +207,15 @@ void JSONAsObjectRowInputFormat::readJSONObject(IColumn & column)
serializations[0]->deserializeTextJSON(column, *buf, format_settings);
}
JSONAsObjectExternalSchemaReader::JSONAsObjectExternalSchemaReader(const FormatSettings & settings)
{
if (!settings.json.allow_object_type)
throw Exception(
ErrorCodes::ILLEGAL_COLUMN,
"Cannot infer the data structure in JSONAsObject format because experimental Object type is not allowed. Set setting "
"allow_experimental_object_type = 1 in order to allow it");
}
void registerInputFormatJSONAsString(FormatFactory & factory)
{
factory.registerInputFormat("JSONAsString", [](
@ -260,9 +270,9 @@ void registerFileSegmentationEngineJSONAsObject(FormatFactory & factory)
void registerJSONAsObjectSchemaReader(FormatFactory & factory)
{
factory.registerExternalSchemaReader("JSONAsObject", [](const FormatSettings &)
factory.registerExternalSchemaReader("JSONAsObject", [](const FormatSettings & settings)
{
return std::make_shared<JSONAsObjectExternalSchemaReader>();
return std::make_shared<JSONAsObjectExternalSchemaReader>(settings);
});
}

View File

@ -77,6 +77,8 @@ public:
class JSONAsObjectExternalSchemaReader : public IExternalSchemaReader
{
public:
JSONAsObjectExternalSchemaReader(const FormatSettings & settings);
NamesAndTypesList readSchema() override
{
return {{"json", std::make_shared<DataTypeObject>("json", false)}};

View File

@ -52,17 +52,12 @@ void JSONCompactEachRowRowOutputFormat::writeRowStartDelimiter()
void JSONCompactEachRowRowOutputFormat::writeRowEndDelimiter()
{
writeChar(']', *ostr);
}
void JSONCompactEachRowRowOutputFormat::writeRowBetweenDelimiter()
{
writeChar('\n', *ostr);
writeCString("]\n", *ostr);
}
void JSONCompactEachRowRowOutputFormat::writeTotals(const Columns & columns, size_t row_num)
{
writeRowBetweenDelimiter();
writeChar('\n', *ostr);
size_t columns_size = columns.size();
writeRowStartDelimiter();
for (size_t i = 0; i < columns_size; ++i)
@ -73,7 +68,6 @@ void JSONCompactEachRowRowOutputFormat::writeTotals(const Columns & columns, siz
writeField(*columns[i], *serializations[i], row_num);
}
writeRowEndDelimiter();
writeRowBetweenDelimiter();
}
void JSONCompactEachRowRowOutputFormat::writeLine(const std::vector<String> & values)
@ -96,22 +90,10 @@ void JSONCompactEachRowRowOutputFormat::writePrefix()
const auto & header = getPort(PortKind::Main).getHeader();
if (with_names)
{
writeLine(JSONUtils::makeNamesValidJSONStrings(header.getNames(), settings, settings.json.validate_utf8));
writeRowBetweenDelimiter();
}
if (with_types)
{
writeLine(JSONUtils::makeNamesValidJSONStrings(header.getDataTypeNames(), settings, settings.json.validate_utf8));
writeRowBetweenDelimiter();
}
}
void JSONCompactEachRowRowOutputFormat::writeSuffix()
{
if (haveWrittenData())
writeChar('\n', *ostr);
}
void JSONCompactEachRowRowOutputFormat::consumeTotals(DB::Chunk chunk)

View File

@ -26,7 +26,6 @@ public:
private:
void writePrefix() override;
void writeSuffix() override;
void writeTotals(const Columns & columns, size_t row_num) override;
@ -34,7 +33,6 @@ private:
void writeFieldDelimiter() override;
void writeRowStartDelimiter() override;
void writeRowEndDelimiter() override;
void writeRowBetweenDelimiter() override;
bool supportTotals() const override { return true; }
void consumeTotals(Chunk) override;

View File

@ -41,7 +41,10 @@ void JSONEachRowRowOutputFormat::writeRowStartDelimiter()
void JSONEachRowRowOutputFormat::writeRowEndDelimiter()
{
writeCString("}", *ostr);
if (settings.json.array_of_rows)
writeChar('}', *ostr);
else
writeCString("}\n", *ostr);
field_number = 0;
}
@ -49,9 +52,7 @@ void JSONEachRowRowOutputFormat::writeRowEndDelimiter()
void JSONEachRowRowOutputFormat::writeRowBetweenDelimiter()
{
if (settings.json.array_of_rows)
writeChar(',', *ostr);
writeChar('\n', *ostr);
writeCString(",\n", *ostr);
}
@ -68,8 +69,6 @@ void JSONEachRowRowOutputFormat::writeSuffix()
{
if (settings.json.array_of_rows)
writeCString("\n]\n", *ostr);
else if (haveWrittenData())
writeChar('\n', *ostr);
}

View File

@ -10,16 +10,13 @@ namespace DB
void JSONEachRowWithProgressRowOutputFormat::writeRowStartDelimiter()
{
if (has_progress)
{
writeProgress();
writeRowBetweenDelimiter();
}
writeCString("{\"row\":{", *ostr);
}
void JSONEachRowWithProgressRowOutputFormat::writeRowEndDelimiter()
{
writeCString("}}", *ostr);
writeCString("}}\n", *ostr);
field_number = 0;
}
@ -30,7 +27,7 @@ void JSONEachRowWithProgressRowOutputFormat::onProgress(const Progress & value)
WriteBufferFromString buf(progress_line);
writeCString("{\"progress\":", buf);
progress.writeJSON(buf);
writeCString("}", buf);
writeCString("}\n", buf);
buf.finalize();
std::lock_guard lock(progress_lines_mutex);
progress_lines.emplace_back(std::move(progress_line));
@ -40,33 +37,22 @@ void JSONEachRowWithProgressRowOutputFormat::onProgress(const Progress & value)
void JSONEachRowWithProgressRowOutputFormat::flush()
{
if (has_progress)
{
if (haveWrittenData())
writeRowBetweenDelimiter();
writeProgress();
}
JSONEachRowRowOutputFormat::flush();
}
void JSONEachRowWithProgressRowOutputFormat::writeSuffix()
{
if (has_progress)
{
writeRowBetweenDelimiter();
writeProgress();
}
JSONEachRowRowOutputFormat::writeSuffix();
}
void JSONEachRowWithProgressRowOutputFormat::writeProgress()
{
std::lock_guard lock(progress_lines_mutex);
for (size_t i = 0; i != progress_lines.size(); ++i)
{
if (i != 0)
writeRowBetweenDelimiter();
writeString(progress_lines[i], *ostr);
}
for (const auto & progress_line : progress_lines)
writeString(progress_line, *ostr);
progress_lines.clear();
has_progress = false;
}

View File

@ -47,18 +47,7 @@ void MarkdownRowOutputFormat::writeFieldDelimiter()
void MarkdownRowOutputFormat::writeRowEndDelimiter()
{
writeCString(" |", out);
}
void MarkdownRowOutputFormat::writeRowBetweenDelimiter()
{
writeChar('\n', out);
}
void MarkdownRowOutputFormat::writeSuffix()
{
if (haveWrittenData())
writeChar('\n', out);
writeCString(" |\n", out);
}
void MarkdownRowOutputFormat::writeField(const IColumn & column, const ISerialization & serialization, size_t row_num)

View File

@ -21,7 +21,6 @@ private:
/// |columnName1|columnName2|...|columnNameN|
/// |:-:|:-:|...|:-:|
void writePrefix() override;
void writeSuffix() override;
/// Write '|' before each row
void writeRowStartDelimiter() override;
@ -29,12 +28,9 @@ private:
/// Write '|' between values
void writeFieldDelimiter() override;
/// Write '|' at the end of each row
/// Write '|\n' at the end of each row
void writeRowEndDelimiter() override;
/// Write '\n' after each row
void writeRowBetweenDelimiter() override;
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
const FormatSettings format_settings;

View File

@ -14,11 +14,6 @@
namespace DB
{
namespace ErrorCodes
{
}
PrettyBlockOutputFormat::PrettyBlockOutputFormat(
WriteBuffer & out_, const Block & header_, const FormatSettings & format_settings_, bool mono_block_)
: IOutputFormat(header_, out_), format_settings(format_settings_), serializations(header_.getSerializations()), mono_block(mono_block_)
@ -37,8 +32,8 @@ void PrettyBlockOutputFormat::calculateWidths(
{
size_t num_rows = std::min(chunk.getNumRows(), format_settings.pretty.max_rows);
/// len(num_rows) + len(". ")
row_number_width = static_cast<size_t>(std::floor(std::log10(num_rows))) + 3;
/// len(num_rows + total_rows) + len(". ")
row_number_width = static_cast<size_t>(std::floor(std::log10(num_rows + total_rows))) + 3;
size_t num_columns = chunk.getNumColumns();
const auto & columns = chunk.getColumns();
@ -295,7 +290,7 @@ void PrettyBlockOutputFormat::writeChunk(const Chunk & chunk, PortKind port_kind
if (format_settings.pretty.output_format_pretty_row_numbers)
{
// Write row number;
auto row_num_string = std::to_string(i + 1) + ". ";
auto row_num_string = std::to_string(i + 1 + total_rows) + ". ";
for (size_t j = 0; j < row_number_width - row_num_string.size(); ++j)
{
writeCString(" ", out);

View File

@ -144,7 +144,7 @@ void PrettyCompactBlockOutputFormat::writeRow(
if (format_settings.pretty.output_format_pretty_row_numbers)
{
// Write row number;
auto row_num_string = std::to_string(row_num + 1) + ". ";
auto row_num_string = std::to_string(row_num + 1 + total_rows) + ". ";
for (size_t i = 0; i < row_number_width - row_num_string.size(); ++i)
{
writeCString(" ", out);

View File

@ -73,7 +73,7 @@ void PrettySpaceBlockOutputFormat::writeChunk(const Chunk & chunk, PortKind port
if (format_settings.pretty.output_format_pretty_row_numbers)
{
// Write row number;
auto row_num_string = std::to_string(row + 1) + ". ";
auto row_num_string = std::to_string(row + 1 + total_rows) + ". ";
for (size_t i = 0; i < row_number_width - row_num_string.size(); ++i)
{
writeCString(" ", out);

View File

@ -65,27 +65,24 @@ void SQLInsertRowOutputFormat::writeRowEndDelimiter()
{
writeChar(')', out);
++rows_in_line;
if (rows_in_line >= format_settings.sql_insert.max_batch_size)
{
writeChar(';', out);
rows_in_line = 0;
}
}
void SQLInsertRowOutputFormat::writeRowBetweenDelimiter()
{
if (rows_in_line == 0)
writeChar('\n', out);
if (rows_in_line >= format_settings.sql_insert.max_batch_size)
{
writeCString(";\n", out);
rows_in_line = 0;
}
else
{
writeCString(", ", out);
}
}
void SQLInsertRowOutputFormat::writeSuffix()
{
if (rows_in_line != 0)
writeChar(';', out);
if (haveWrittenData())
writeChar('\n', out);
writeCString(";\n", out);
}
void SQLInsertRowOutputFormat::resetFormatterImpl()

View File

@ -29,14 +29,9 @@ void TSKVRowOutputFormat::writeField(const IColumn & column, const ISerializatio
void TSKVRowOutputFormat::writeRowEndDelimiter()
{
field_number = 0;
}
void TSKVRowOutputFormat::writeRowBetweenDelimiter()
{
writeChar('\n', out);
field_number = 0;
}

View File

@ -21,7 +21,6 @@ public:
private:
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
void writeRowEndDelimiter() override;
void writeRowBetweenDelimiter() override;
/// Disable totals and extremes, because they are enabled in TSV.
bool supportTotals() const override { return false; }

View File

@ -36,16 +36,10 @@ void TabSeparatedRowOutputFormat::writePrefix()
const auto & header = getPort(PortKind::Main).getHeader();
if (with_names)
{
writeLine(header.getNames());
writeRowBetweenDelimiter();
}
if (with_types)
{
writeLine(header.getDataTypeNames());
writeRowBetweenDelimiter();
}
}
@ -64,38 +58,21 @@ void TabSeparatedRowOutputFormat::writeFieldDelimiter()
}
void TabSeparatedRowOutputFormat::writeRowBetweenDelimiter()
void TabSeparatedRowOutputFormat::writeRowEndDelimiter()
{
if (format_settings.tsv.crlf_end_of_line)
writeChar('\r', out);
writeChar('\n', out);
}
void TabSeparatedRowOutputFormat::writeSuffix()
{
/// Output '\n' an the end of data if we had any data.
if (haveWrittenData())
writeRowBetweenDelimiter();
}
void TabSeparatedRowOutputFormat::writeBeforeTotals()
{
writeRowBetweenDelimiter();
writeChar('\n', out);
}
void TabSeparatedRowOutputFormat::writeBeforeExtremes()
{
writeRowBetweenDelimiter();
}
void TabSeparatedRowOutputFormat::writeAfterTotals()
{
writeRowBetweenDelimiter();
}
void TabSeparatedRowOutputFormat::writeAfterExtremes()
{
writeRowBetweenDelimiter();
writeChar('\n', out);
}
void registerOutputFormatTabSeparated(FormatFactory & factory)

View File

@ -35,18 +35,15 @@ public:
protected:
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
void writeFieldDelimiter() override final;
void writeRowBetweenDelimiter() override;
void writeRowEndDelimiter() override;
bool supportTotals() const override { return true; }
bool supportExtremes() const override { return true; }
void writeBeforeTotals() override final;
void writeAfterTotals() override final;
void writeBeforeExtremes() override final;
void writeAfterExtremes() override final;
void writePrefix() override;
void writeSuffix() override;
void writeLine(const std::vector<String> & values);
bool with_names;

View File

@ -89,20 +89,14 @@ void XMLRowOutputFormat::writeRowStartDelimiter()
void XMLRowOutputFormat::writeRowEndDelimiter()
{
writeCString("\t\t</row>", *ostr);
writeCString("\t\t</row>\n", *ostr);
field_number = 0;
++row_count;
}
void XMLRowOutputFormat::writeRowBetweenDelimiter()
{
writeChar('\n', *ostr);
}
void XMLRowOutputFormat::writeSuffix()
{
writeCString("\n\t</data>\n", *ostr);
writeCString("\t</data>\n", *ostr);
}
@ -152,7 +146,7 @@ void XMLRowOutputFormat::writeMaxExtreme(const Columns & columns, size_t row_num
void XMLRowOutputFormat::writeAfterExtremes()
{
writeCString("\n\t</extremes>\n", *ostr);
writeCString("\t</extremes>\n", *ostr);
}
void XMLRowOutputFormat::writeExtremesElement(const char * title, const Columns & columns, size_t row_num)
@ -179,7 +173,7 @@ void XMLRowOutputFormat::writeExtremesElement(const char * title, const Columns
writeCString("\t\t</", *ostr);
writeCString(title, *ostr);
writeCString(">", *ostr);
writeCString(">\n", *ostr);
}

View File

@ -24,7 +24,6 @@ private:
void writeField(const IColumn & column, const ISerialization & serialization, size_t row_num) override;
void writeRowStartDelimiter() override;
void writeRowEndDelimiter() override;
void writeRowBetweenDelimiter() override;
void writePrefix() override;
void writeSuffix() override;
void finalizeImpl() override;

View File

@ -34,6 +34,9 @@ void HTTPServerConnection::run()
Poco::Timestamp now;
if (!forwarded_for.empty())
request.set("X-Forwarded-For", forwarded_for);
if (request.isSecure())
{
size_t hsts_max_age = context->getMaxHstsAge();

View File

@ -21,6 +21,18 @@ public:
Poco::Net::HTTPServerParams::Ptr params,
HTTPRequestHandlerFactoryPtr factory);
HTTPServerConnection(
HTTPContextPtr context_,
TCPServer & tcp_server_,
const Poco::Net::StreamSocket & socket_,
Poco::Net::HTTPServerParams::Ptr params_,
HTTPRequestHandlerFactoryPtr factory_,
const String & forwarded_for_)
: HTTPServerConnection(context_, tcp_server_, socket_, params_, factory_)
{
forwarded_for = forwarded_for_;
}
void run() override;
protected:
@ -31,6 +43,7 @@ private:
TCPServer & tcp_server;
Poco::Net::HTTPServerParams::Ptr params;
HTTPRequestHandlerFactoryPtr factory;
String forwarded_for;
bool stopped;
std::mutex mutex; // guards the |factory| with assumption that creating handlers is not thread-safe.
};

View File

@ -16,4 +16,9 @@ Poco::Net::TCPServerConnection * HTTPServerConnectionFactory::createConnection(c
return new HTTPServerConnection(context, tcp_server, socket, params, factory);
}
Poco::Net::TCPServerConnection * HTTPServerConnectionFactory::createConnection(const Poco::Net::StreamSocket & socket, TCPServer & tcp_server, TCPProtocolStackData & stack_data)
{
return new HTTPServerConnection(context, tcp_server, socket, params, factory, stack_data.forwarded_for);
}
}

View File

@ -15,6 +15,7 @@ public:
HTTPServerConnectionFactory(HTTPContextPtr context, Poco::Net::HTTPServerParams::Ptr params, HTTPRequestHandlerFactoryPtr factory);
Poco::Net::TCPServerConnection * createConnection(const Poco::Net::StreamSocket & socket, TCPServer & tcp_server) override;
Poco::Net::TCPServerConnection * createConnection(const Poco::Net::StreamSocket & socket, TCPServer & tcp_server, TCPProtocolStackData & stack_data) override;
private:
HTTPContextPtr context;

View File

@ -41,6 +41,7 @@
#include <Poco/MemoryStream.h>
#include <Poco/StreamCopier.h>
#include <Poco/String.h>
#include <Poco/Net/SocketAddress.h>
#include <chrono>
#include <sstream>
@ -469,9 +470,15 @@ bool HTTPHandler::authenticateUser(
client_info.forwarded_for = request.get("X-Forwarded-For", "");
client_info.quota_key = quota_key;
/// Extract the last entry from comma separated list of forwarded_for addresses.
/// Only the last proxy can be trusted (if any).
String forwarded_address = client_info.getLastForwardedFor();
try
{
session->authenticate(*request_credentials, request.clientAddress());
if (!forwarded_address.empty() && server.config().getBool("auth_use_forwarded_address", false))
session->authenticate(*request_credentials, Poco::Net::SocketAddress(forwarded_address, request.clientAddress().port()));
else
session->authenticate(*request_credentials, request.clientAddress());
}
catch (const Authentication::Require<BasicCredentials> & required_credentials)
{

View File

@ -60,6 +60,9 @@ void ProxyV1Handler::run()
// read port and "\r\n"
if (!readWord(5, word, eol) || !eol)
throw ParsingException("PROXY protocol violation", ErrorCodes::CANNOT_PARSE_INPUT_ASSERTION_FAILED);
if (!stack_data.forwarded_for.empty())
LOG_TRACE(log, "Forwarded client address from PROXY header: {}", stack_data.forwarded_for);
}
bool ProxyV1Handler::readWord(int max_len, std::string & word, bool & eol)

View File

@ -3,6 +3,7 @@
#include <Poco/Net/TCPServerConnection.h>
#include <Server/IServer.h>
#include <Server/TCPProtocolStackData.h>
#include <Common/logger_useful.h>
namespace DB
@ -13,7 +14,7 @@ class ProxyV1Handler : public Poco::Net::TCPServerConnection
using StreamSocket = Poco::Net::StreamSocket;
public:
explicit ProxyV1Handler(const StreamSocket & socket, IServer & server_, const std::string & conf_name_, TCPProtocolStackData & stack_data_)
: Poco::Net::TCPServerConnection(socket), server(server_), conf_name(conf_name_), stack_data(stack_data_) {}
: Poco::Net::TCPServerConnection(socket), log(&Poco::Logger::get("ProxyV1Handler")), server(server_), conf_name(conf_name_), stack_data(stack_data_) {}
void run() override;
@ -21,6 +22,7 @@ protected:
bool readWord(int max_len, std::string & word, bool & eol);
private:
Poco::Logger * log;
IServer & server;
std::string conf_name;
TCPProtocolStackData & stack_data;

View File

@ -9,6 +9,7 @@
#include <base/types.h>
#include <base/scope_guard.h>
#include <Poco/Net/NetException.h>
#include <Poco/Net/SocketAddress.h>
#include <Poco/Util/LayeredConfiguration.h>
#include <Common/CurrentThread.h>
#include <Common/Stopwatch.h>
@ -120,6 +121,8 @@ TCPHandler::TCPHandler(IServer & server_, TCPServer & tcp_server_, const Poco::N
, default_database(stack_data.default_database)
, server_display_name(std::move(server_display_name_))
{
if (!forwarded_for.empty())
LOG_TRACE(log, "Forwarded client address: {}", forwarded_for);
}
TCPHandler::~TCPHandler()
@ -1156,7 +1159,15 @@ void TCPHandler::receiveHello()
}
session = makeSession();
session->authenticate(user, password, socket().peerAddress());
auto & client_info = session->getClientInfo();
/// Extract the last entry from comma separated list of forwarded_for addresses.
/// Only the last proxy can be trusted (if any).
String forwarded_address = client_info.getLastForwardedFor();
if (!forwarded_address.empty() && server.config().getBool("auth_use_forwarded_address", false))
session->authenticate(user, password, Poco::Net::SocketAddress(forwarded_address, socket().peerAddress().port()));
else
session->authenticate(user, password, socket().peerAddress());
}
void TCPHandler::receiveAddendum()

View File

@ -54,9 +54,11 @@ private:
Poco::Logger * log;
UVLoop loop;
/// Preserve order of destruction here:
/// destruct connection and handler before the loop above.
RabbitMQHandler event_handler;
std::unique_ptr<AMQP::TcpConnection> connection;
std::mutex mutex;
};

View File

@ -621,7 +621,6 @@ void StorageRabbitMQ::prepareChannelForConsumer(RabbitMQConsumerPtr consumer)
consumer->setupChannel();
}
void StorageRabbitMQ::unbindExchange()
{
/* This is needed because with RabbitMQ (without special adjustments) can't, for example, properly make mv if there was insert query
@ -812,6 +811,8 @@ void StorageRabbitMQ::shutdown()
{
shutdown_called = true;
LOG_TRACE(log, "Deactivating background tasks");
/// In case it has not yet been able to setup connection;
deactivateTask(connection_task, true, false);
@ -820,6 +821,8 @@ void StorageRabbitMQ::shutdown()
deactivateTask(streaming_task, true, false);
deactivateTask(looping_task, true, true);
LOG_TRACE(log, "Cleaning up RabbitMQ after table usage");
/// Just a paranoid try catch, it is not actually needed.
try
{
@ -842,6 +845,8 @@ void StorageRabbitMQ::shutdown()
{
tryLogCurrentException(log);
}
LOG_TRACE(log, "Shutdown finished");
}

View File

@ -6,6 +6,7 @@
#include <boost/noncopyable.hpp>
#include <Common/Exception.h>
#include <Common/logger_useful.h>
namespace DB
{
@ -30,7 +31,30 @@ public:
~UVLoop()
{
if (loop_ptr)
uv_loop_close(loop_ptr.get());
{
auto res = uv_loop_close(loop_ptr.get());
if (res == UV_EBUSY)
{
LOG_DEBUG(log, "Closing pending handles");
uv_walk(loop_ptr.get(), onUVWalkClosingCallback, nullptr);
/// Run the loop until there are no pending callbacks.
while ((res = uv_run(loop_ptr.get(), UV_RUN_ONCE)) != 0)
{
LOG_DEBUG(log, "Waiting for pending callbacks to finish ({})", res);
}
res = uv_loop_close(loop_ptr.get());
if (res == UV_EBUSY)
{
LOG_ERROR(
log, "Failed to close libuv loop (active requests/handles in the loop: {})",
uv_loop_alive(loop_ptr.get()));
chassert(false);
}
}
}
}
inline uv_loop_t * getLoop() { return loop_ptr.get(); }
@ -39,6 +63,15 @@ public:
private:
std::unique_ptr<uv_loop_t> loop_ptr;
Poco::Logger * log = &Poco::Logger::get("UVLoop");
static void onUVWalkClosingCallback(uv_handle_t * handle, void *)
{
if (!uv_is_closing(handle))
uv_close(handle, onUVCloseCallback);
}
static void onUVCloseCallback(uv_handle_t *) {}
};
}

View File

@ -77,7 +77,7 @@ def main(args):
pr_info.number,
pr_info.sha,
test_results,
[],
args.status,
check_name_with_group,
)

View File

@ -203,6 +203,7 @@ if __name__ == "__main__":
temp_path = TEMP_PATH
repo_path = REPO_COPY
reports_path = REPORTS_PATH
post_commit_path = os.path.join(temp_path, "functional_commit_status.tsv")
args = parse_args()
check_name = args.check_name
@ -227,7 +228,7 @@ if __name__ == "__main__":
if validate_bugfix_check and "pr-bugfix" not in pr_info.labels:
if args.post_commit_status == "file":
post_commit_status_to_file(
os.path.join(temp_path, "post_commit_status.tsv"),
post_commit_path,
f"Skipped (no pr-bugfix in {pr_info.labels})",
"success",
"null",
@ -264,9 +265,11 @@ if __name__ == "__main__":
state=state,
)
elif args.post_commit_status == "file":
fpath = os.path.join(temp_path, "post_commit_status.tsv")
post_commit_status_to_file(
fpath, description=NO_CHANGES_MSG, state=state, report_url="null"
post_commit_path,
description=NO_CHANGES_MSG,
state=state,
report_url="null",
)
sys.exit(0)
@ -348,7 +351,7 @@ if __name__ == "__main__":
)
elif args.post_commit_status == "file":
post_commit_status_to_file(
os.path.join(temp_path, "post_commit_status.tsv"),
post_commit_path,
description,
state,
report_url,

View File

@ -148,6 +148,7 @@ if __name__ == "__main__":
stopwatch = Stopwatch()
temp_path = TEMP_PATH
post_commit_path = os.path.join(temp_path, "integration_commit_status.tsv")
repo_path = REPO_COPY
reports_path = REPORTS_PATH
@ -180,7 +181,7 @@ if __name__ == "__main__":
if validate_bugfix_check and "pr-bugfix" not in pr_info.labels:
if args.post_commit_status == "file":
post_commit_status_to_file(
os.path.join(temp_path, "post_commit_status.tsv"),
post_commit_path,
f"Skipped (no pr-bugfix in {pr_info.labels})",
"success",
"null",
@ -275,7 +276,7 @@ if __name__ == "__main__":
)
elif args.post_commit_status == "file":
post_commit_status_to_file(
os.path.join(temp_path, "post_commit_status.tsv"),
post_commit_path,
description,
state,
report_url,

View File

@ -236,7 +236,7 @@ if __name__ == "__main__":
# TODO: Remove me, always green mode for the first time, unless errors
status = "success"
if "errors" in message:
if "errors" in message.lower():
status = "failure"
# TODO: Remove until here
except Exception:

View File

@ -275,7 +275,6 @@ tr:hover td {{filter: brightness(95%);}}
<th>Compiler</th>
<th>Build type</th>
<th>Sanitizer</th>
<th>Libraries</th>
<th>Status</th>
<th>Build log</th>
<th>Build time</th>
@ -319,8 +318,6 @@ def create_build_html_report(
else:
row += "<td>none</td>"
row += f"<td>{build_result.libraries}</td>"
if build_result.status:
style = _get_status_style(build_result.status)
row += f'<td style="{style}">{build_result.status}</td>'

View File

@ -1,4 +1,5 @@
<clickhouse>
<auth_use_forwarded_address>true</auth_use_forwarded_address>
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
<openSSL>
<server> <!-- Used for https server AND secure tcp port -->

Some files were not shown because too many files have changed in this diff Show More