mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 07:31:57 +00:00
Merge branch 'master' into database_atomic_improvements
This commit is contained in:
commit
a612080df1
@ -287,7 +287,7 @@ endif ()
|
|||||||
|
|
||||||
include(cmake/dbms_glob_sources.cmake)
|
include(cmake/dbms_glob_sources.cmake)
|
||||||
|
|
||||||
if (OS_LINUX)
|
if (OS_LINUX OR OS_ANDROID)
|
||||||
include(cmake/linux/default_libs.cmake)
|
include(cmake/linux/default_libs.cmake)
|
||||||
elseif (OS_DARWIN)
|
elseif (OS_DARWIN)
|
||||||
include(cmake/darwin/default_libs.cmake)
|
include(cmake/darwin/default_libs.cmake)
|
||||||
|
@ -16,5 +16,4 @@ ClickHouse is an open-source column-oriented database management system that all
|
|||||||
|
|
||||||
## Upcoming Events
|
## Upcoming Events
|
||||||
|
|
||||||
* [ClickHouse virtual office hours](https://www.eventbrite.com/e/clickhouse-july-virtual-meetup-tickets-111199787558) on July 15, 2020.
|
|
||||||
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 17, 2020.
|
* [ClickHouse at ByteDance (in Chinese)](https://mp.weixin.qq.com/s/Em-HjPylO8D7WPui4RREAQ) on July 17, 2020.
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#include <common/getThreadId.h>
|
#include <common/getThreadId.h>
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_ANDROID)
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#elif defined(OS_LINUX)
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <syscall.h>
|
#include <syscall.h>
|
||||||
#elif defined(OS_FREEBSD)
|
#elif defined(OS_FREEBSD)
|
||||||
@ -16,7 +19,9 @@ uint64_t getThreadId()
|
|||||||
{
|
{
|
||||||
if (!current_tid)
|
if (!current_tid)
|
||||||
{
|
{
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_ANDROID)
|
||||||
|
current_tid = gettid();
|
||||||
|
#elif defined(OS_LINUX)
|
||||||
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
|
current_tid = syscall(SYS_gettid); /// This call is always successful. - man gettid
|
||||||
#elif defined(OS_FREEBSD)
|
#elif defined(OS_FREEBSD)
|
||||||
current_tid = pthread_getthreadid_np();
|
current_tid = pthread_getthreadid_np();
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
#include <signal.h>
|
#include <signal.h>
|
||||||
#include <cxxabi.h>
|
#include <cxxabi.h>
|
||||||
#include <execinfo.h>
|
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#
|
#
|
||||||
# Sets values of:
|
# Sets values of:
|
||||||
# OPENLDAP_FOUND - TRUE if found
|
# OPENLDAP_FOUND - TRUE if found
|
||||||
# OPENLDAP_INCLUDE_DIR - path to the include directory
|
# OPENLDAP_INCLUDE_DIRS - paths to the include directories
|
||||||
# OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries
|
# OPENLDAP_LIBRARIES - paths to the libldap and liblber libraries
|
||||||
# OPENLDAP_LDAP_LIBRARY - paths to the libldap library
|
# OPENLDAP_LDAP_LIBRARY - paths to the libldap library
|
||||||
# OPENLDAP_LBER_LIBRARY - paths to the liblber library
|
# OPENLDAP_LBER_LIBRARY - paths to the liblber library
|
||||||
@ -28,11 +28,11 @@ if(OPENLDAP_USE_REENTRANT_LIBS)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(OPENLDAP_ROOT_DIR)
|
if(OPENLDAP_ROOT_DIR)
|
||||||
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
|
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "include" NO_DEFAULT_PATH)
|
||||||
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
|
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
|
||||||
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
|
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber" PATHS "${OPENLDAP_ROOT_DIR}" PATH_SUFFIXES "lib" NO_DEFAULT_PATH)
|
||||||
else()
|
else()
|
||||||
find_path(OPENLDAP_INCLUDE_DIR NAMES "ldap.h" "lber.h")
|
find_path(OPENLDAP_INCLUDE_DIRS NAMES "ldap.h" "lber.h")
|
||||||
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}")
|
find_library(OPENLDAP_LDAP_LIBRARY NAMES "ldap${_r_suffix}")
|
||||||
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber")
|
find_library(OPENLDAP_LBER_LIBRARY NAMES "lber")
|
||||||
endif()
|
endif()
|
||||||
@ -44,10 +44,10 @@ set(OPENLDAP_LIBRARIES ${OPENLDAP_LDAP_LIBRARY} ${OPENLDAP_LBER_LIBRARY})
|
|||||||
include(FindPackageHandleStandardArgs)
|
include(FindPackageHandleStandardArgs)
|
||||||
find_package_handle_standard_args(
|
find_package_handle_standard_args(
|
||||||
OpenLDAP DEFAULT_MSG
|
OpenLDAP DEFAULT_MSG
|
||||||
OPENLDAP_INCLUDE_DIR OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
|
OPENLDAP_INCLUDE_DIRS OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY
|
||||||
)
|
)
|
||||||
|
|
||||||
mark_as_advanced(OPENLDAP_INCLUDE_DIR OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
|
mark_as_advanced(OPENLDAP_INCLUDE_DIRS OPENLDAP_LIBRARIES OPENLDAP_LDAP_LIBRARY OPENLDAP_LBER_LIBRARY)
|
||||||
|
|
||||||
if(OPENLDAP_USE_STATIC_LIBS)
|
if(OPENLDAP_USE_STATIC_LIBS)
|
||||||
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES})
|
set(CMAKE_FIND_LIBRARY_SUFFIXES ${_orig_CMAKE_FIND_LIBRARY_SUFFIXES})
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
SET(ENABLE_AMQPCPP ${ENABLE_LIBRARIES})
|
option(ENABLE_AMQPCPP "Enalbe AMQP-CPP" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/AMQP-CPP/CMakeLists.txt")
|
||||||
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
|
message (WARNING "submodule contrib/AMQP-CPP is missing. to fix try run: \n git submodule update --init --recursive")
|
||||||
set (ENABLE_AMQPCPP 0)
|
set (ENABLE_AMQPCPP 0)
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
option (ENABLE_GTEST_LIBRARY "Enable gtest library" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (ENABLE_GTEST_LIBRARY)
|
||||||
|
|
||||||
option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test instead of bundled" ${NOT_UNBUNDLED})
|
option (USE_INTERNAL_GTEST_LIBRARY "Set to FALSE to use system Google Test instead of bundled" ${NOT_UNBUNDLED})
|
||||||
|
|
||||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt")
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/googletest/googletest/CMakeLists.txt")
|
||||||
@ -28,4 +32,6 @@ if((GTEST_INCLUDE_DIRS AND GTEST_BOTH_LIBRARIES) OR GTEST_SRC_DIR)
|
|||||||
set(USE_GTEST 1)
|
set(USE_GTEST 1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
message (STATUS "Using gtest=${USE_GTEST}: ${GTEST_INCLUDE_DIRS} : ${GTEST_BOTH_LIBRARIES} : ${GTEST_SRC_DIR}")
|
message (STATUS "Using gtest=${USE_GTEST}: ${GTEST_INCLUDE_DIRS} : ${GTEST_BOTH_LIBRARIES} : ${GTEST_SRC_DIR}")
|
||||||
|
@ -16,11 +16,16 @@ if (ENABLE_LDAP)
|
|||||||
set (OPENLDAP_USE_REENTRANT_LIBS 1)
|
set (OPENLDAP_USE_REENTRANT_LIBS 1)
|
||||||
|
|
||||||
if (NOT USE_INTERNAL_LDAP_LIBRARY)
|
if (NOT USE_INTERNAL_LDAP_LIBRARY)
|
||||||
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
|
if (OPENLDAP_USE_STATIC_LIBS)
|
||||||
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
|
message (WARNING "Unable to use external static OpenLDAP libraries, falling back to the bundled version.")
|
||||||
endif ()
|
set (USE_INTERNAL_LDAP_LIBRARY 1)
|
||||||
|
else ()
|
||||||
|
if (APPLE AND NOT OPENLDAP_ROOT_DIR)
|
||||||
|
set (OPENLDAP_ROOT_DIR "/usr/local/opt/openldap")
|
||||||
|
endif ()
|
||||||
|
|
||||||
find_package (OpenLDAP)
|
find_package (OpenLDAP)
|
||||||
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
|
if (NOT OPENLDAP_FOUND AND NOT MISSING_INTERNAL_LDAP_LIBRARY)
|
||||||
@ -54,7 +59,10 @@ if (ENABLE_LDAP)
|
|||||||
else ()
|
else ()
|
||||||
set (USE_INTERNAL_LDAP_LIBRARY 1)
|
set (USE_INTERNAL_LDAP_LIBRARY 1)
|
||||||
set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap")
|
set (OPENLDAP_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap")
|
||||||
set (OPENLDAP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/openldap/include")
|
set (OPENLDAP_INCLUDE_DIRS
|
||||||
|
"${ClickHouse_SOURCE_DIR}/contrib/openldap-cmake/${_system_name}_${_system_processor}/include"
|
||||||
|
"${ClickHouse_SOURCE_DIR}/contrib/openldap/include"
|
||||||
|
)
|
||||||
# Below, 'ldap'/'ldap_r' and 'lber' will be resolved to
|
# Below, 'ldap'/'ldap_r' and 'lber' will be resolved to
|
||||||
# the targets defined in contrib/openldap-cmake/CMakeLists.txt
|
# the targets defined in contrib/openldap-cmake/CMakeLists.txt
|
||||||
if (OPENLDAP_USE_REENTRANT_LIBS)
|
if (OPENLDAP_USE_REENTRANT_LIBS)
|
||||||
@ -73,4 +81,4 @@ if (ENABLE_LDAP)
|
|||||||
endif ()
|
endif ()
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIR} : ${OPENLDAP_LIBRARIES}")
|
message (STATUS "Using ldap=${USE_LDAP}: ${OPENLDAP_INCLUDE_DIRS} : ${OPENLDAP_LIBRARIES}")
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
option(ENABLE_GSASL_LIBRARY "Enable gsasl library" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (ENABLE_GSASL_LIBRARY)
|
||||||
|
|
||||||
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
|
option (USE_INTERNAL_LIBGSASL_LIBRARY "Set to FALSE to use system libgsasl library instead of bundled" ${NOT_UNBUNDLED})
|
||||||
|
|
||||||
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
|
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libgsasl/src/gsasl.h")
|
||||||
@ -24,4 +28,6 @@ if(LIBGSASL_LIBRARY AND LIBGSASL_INCLUDE_DIR)
|
|||||||
set (USE_LIBGSASL 1)
|
set (USE_LIBGSASL 1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
message (STATUS "Using libgsasl=${USE_LIBGSASL}: ${LIBGSASL_INCLUDE_DIR} : ${LIBGSASL_LIBRARY}")
|
message (STATUS "Using libgsasl=${USE_LIBGSASL}: ${LIBGSASL_INCLUDE_DIR} : ${LIBGSASL_LIBRARY}")
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
option (ENABLE_MSGPACK "Enable msgpack library" ${ENABLE_LIBRARIES})
|
||||||
|
|
||||||
|
if (ENABLE_MSGPACK)
|
||||||
|
|
||||||
option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED})
|
option (USE_INTERNAL_MSGPACK_LIBRARY "Set to FALSE to use system msgpack library instead of bundled" ${NOT_UNBUNDLED})
|
||||||
|
|
||||||
if (USE_INTERNAL_MSGPACK_LIBRARY)
|
if (USE_INTERNAL_MSGPACK_LIBRARY)
|
||||||
@ -14,4 +18,10 @@ else()
|
|||||||
find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS})
|
find_path(MSGPACK_INCLUDE_DIR NAMES msgpack.hpp PATHS ${MSGPACK_INCLUDE_PATHS})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
message(STATUS "Using msgpack: ${MSGPACK_INCLUDE_DIR}")
|
if (MSGPACK_INCLUDE_DIR)
|
||||||
|
set(USE_MSGPACK 1)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
endif()
|
||||||
|
|
||||||
|
message(STATUS "Using msgpack=${USE_MSGPACK}: ${MSGPACK_INCLUDE_DIR}")
|
||||||
|
@ -11,7 +11,12 @@ else ()
|
|||||||
set (BUILTINS_LIBRARY "-lgcc")
|
set (BUILTINS_LIBRARY "-lgcc")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
if (OS_ANDROID)
|
||||||
|
# pthread and rt are included in libc
|
||||||
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -ldl")
|
||||||
|
else ()
|
||||||
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
|
set (DEFAULT_LIBS "${DEFAULT_LIBS} ${BUILTINS_LIBRARY} ${COVERAGE_OPTION} -lc -lm -lrt -lpthread -ldl")
|
||||||
|
endif ()
|
||||||
|
|
||||||
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
message(STATUS "Default libraries: ${DEFAULT_LIBS}")
|
||||||
|
|
||||||
@ -35,7 +40,11 @@ add_library(global-libs INTERFACE)
|
|||||||
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
||||||
find_package(Threads REQUIRED)
|
find_package(Threads REQUIRED)
|
||||||
|
|
||||||
add_subdirectory(base/glibc-compatibility)
|
if (NOT OS_ANDROID)
|
||||||
|
# Our compatibility layer doesn't build under Android, many errors in musl.
|
||||||
|
add_subdirectory(base/glibc-compatibility)
|
||||||
|
endif ()
|
||||||
|
|
||||||
include (cmake/find/unwind.cmake)
|
include (cmake/find/unwind.cmake)
|
||||||
include (cmake/find/cxx.cmake)
|
include (cmake/find/cxx.cmake)
|
||||||
|
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
if (CMAKE_SYSTEM_NAME MATCHES "Linux")
|
||||||
set (OS_LINUX 1)
|
set (OS_LINUX 1)
|
||||||
add_definitions(-D OS_LINUX)
|
add_definitions(-D OS_LINUX)
|
||||||
|
elseif (CMAKE_SYSTEM_NAME MATCHES "Android")
|
||||||
|
# This is a toy configuration and not in CI, so expect it to be broken.
|
||||||
|
# Use cmake flags such as: -DCMAKE_TOOLCHAIN_FILE=~/ch2/android-ndk-r21d/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=28
|
||||||
|
set (OS_ANDROID 1)
|
||||||
|
add_definitions(-D OS_ANDROID)
|
||||||
elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
elseif (CMAKE_SYSTEM_NAME MATCHES "FreeBSD")
|
||||||
set (OS_FREEBSD 1)
|
set (OS_FREEBSD 1)
|
||||||
add_definitions(-D OS_FREEBSD)
|
add_definitions(-D OS_FREEBSD)
|
||||||
@ -17,7 +22,7 @@ if (CMAKE_CROSSCOMPILING)
|
|||||||
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
set (ENABLE_PARQUET OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_ICU OFF CACHE INTERNAL "")
|
set (ENABLE_ICU OFF CACHE INTERNAL "")
|
||||||
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
|
set (ENABLE_FASTOPS OFF CACHE INTERNAL "")
|
||||||
elseif (OS_LINUX)
|
elseif (OS_LINUX OR OS_ANDROID)
|
||||||
if (ARCH_AARCH64)
|
if (ARCH_AARCH64)
|
||||||
# FIXME: broken dependencies
|
# FIXME: broken dependencies
|
||||||
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
set (ENABLE_PROTOBUF OFF CACHE INTERNAL "")
|
||||||
|
@ -22,7 +22,7 @@ elseif (COMPILER_CLANG)
|
|||||||
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
|
if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS ${APPLE_CLANG_MINIMUM_VERSION})
|
||||||
message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
|
message (FATAL_ERROR "AppleClang compiler version must be at least ${APPLE_CLANG_MINIMUM_VERSION} (Xcode ${XCODE_MINIMUM_VERSION}).")
|
||||||
elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0)
|
elseif (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 11.0.0)
|
||||||
# char8_t is available staring (upstream vanilla) Clang 7, but prior to Clang 8,
|
# char8_t is available starting (upstream vanilla) Clang 7, but prior to Clang 8,
|
||||||
# it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t.
|
# it is not enabled by -std=c++20 and can be enabled with an explicit -fchar8_t.
|
||||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t")
|
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fchar8_t")
|
||||||
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")
|
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fchar8_t")
|
||||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -102,7 +102,7 @@ if (USE_INTERNAL_SSL_LIBRARY)
|
|||||||
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
|
add_library(OpenSSL::SSL ALIAS ${OPENSSL_SSL_LIBRARY})
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (ENABLE_LDAP AND USE_INTERNAL_LDAP_LIBRARY)
|
if (USE_INTERNAL_LDAP_LIBRARY)
|
||||||
add_subdirectory (openldap-cmake)
|
add_subdirectory (openldap-cmake)
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
|
@ -88,6 +88,10 @@
|
|||||||
"name": "yandex/clickhouse-testflows-runner",
|
"name": "yandex/clickhouse-testflows-runner",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
},
|
},
|
||||||
|
"docker/test/fasttest": {
|
||||||
|
"name": "yandex/clickhouse-fasttest",
|
||||||
|
"dependent": []
|
||||||
|
},
|
||||||
"docker/test/integration/s3_proxy": {
|
"docker/test/integration/s3_proxy": {
|
||||||
"name": "yandex/clickhouse-s3-proxy",
|
"name": "yandex/clickhouse-s3-proxy",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
@ -96,4 +100,5 @@
|
|||||||
"name": "yandex/clickhouse-python-bottle",
|
"name": "yandex/clickhouse-python-bottle",
|
||||||
"dependent": []
|
"dependent": []
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,24 @@ then
|
|||||||
rm /output/clickhouse-odbc-bridge ||:
|
rm /output/clickhouse-odbc-bridge ||:
|
||||||
|
|
||||||
cp -r ../docker/test/performance-comparison /output/scripts ||:
|
cp -r ../docker/test/performance-comparison /output/scripts ||:
|
||||||
|
|
||||||
|
# We have to know the revision that corresponds to this binary build.
|
||||||
|
# It is not the nominal SHA from pull/*/head, but the pull/*/merge, which is
|
||||||
|
# head merged to master by github, at some point after the PR is updated.
|
||||||
|
# There are some quirks to consider:
|
||||||
|
# - apparently the real SHA is not recorded in system.build_options;
|
||||||
|
# - it can change at any time as github pleases, so we can't just record
|
||||||
|
# the SHA and use it later, it might become inaccessible;
|
||||||
|
# - CI has an immutable snapshot of repository that it uses for all checks
|
||||||
|
# for a given nominal SHA, but it is not accessible outside Yandex.
|
||||||
|
# This is why we add this repository snapshot from CI to the performance test
|
||||||
|
# package.
|
||||||
|
mkdir /output/ch
|
||||||
|
git -C /output/ch init --bare
|
||||||
|
git -C /output/ch remote add origin /build
|
||||||
|
git -C /output/ch fetch --no-tags --depth 50 origin HEAD
|
||||||
|
git -C /output/ch reset --soft FETCH_HEAD
|
||||||
|
git -C /output/ch log -5
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# May be set for split build or for performance test.
|
# May be set for split build or for performance test.
|
||||||
|
65
docker/test/fasttest/Dockerfile
Normal file
65
docker/test/fasttest/Dockerfile
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# docker build -t yandex/clickhouse-fasttest .
|
||||||
|
FROM ubuntu:19.10
|
||||||
|
|
||||||
|
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.4.20200302/clickhouse-odbc-1.1.4-Linux.tar.gz"
|
||||||
|
ENV COMMIT_SHA=''
|
||||||
|
ENV PULL_REQUEST_NUMBER=''
|
||||||
|
|
||||||
|
RUN apt-get --allow-unauthenticated update -y && apt-get install --yes wget gnupg
|
||||||
|
RUN wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
|
||||||
|
RUN echo "deb [trusted=yes] http://apt.llvm.org/eoan/ llvm-toolchain-eoan-10 main" >> /etc/apt/sources.list
|
||||||
|
|
||||||
|
|
||||||
|
RUN apt-get --allow-unauthenticated update -y \
|
||||||
|
&& env DEBIAN_FRONTEND=noninteractive \
|
||||||
|
apt-get --allow-unauthenticated install --yes --no-install-recommends \
|
||||||
|
bash \
|
||||||
|
fakeroot \
|
||||||
|
ccache \
|
||||||
|
software-properties-common \
|
||||||
|
apt-transport-https \
|
||||||
|
ca-certificates \
|
||||||
|
wget \
|
||||||
|
bash \
|
||||||
|
fakeroot \
|
||||||
|
cmake \
|
||||||
|
ccache \
|
||||||
|
llvm-10 \
|
||||||
|
clang-10 \
|
||||||
|
lld-10 \
|
||||||
|
clang-tidy-10 \
|
||||||
|
ninja-build \
|
||||||
|
gperf \
|
||||||
|
git \
|
||||||
|
tzdata \
|
||||||
|
gperf \
|
||||||
|
rename \
|
||||||
|
build-essential \
|
||||||
|
expect \
|
||||||
|
python \
|
||||||
|
python-lxml \
|
||||||
|
python-termcolor \
|
||||||
|
python-requests \
|
||||||
|
unixodbc \
|
||||||
|
qemu-user-static \
|
||||||
|
sudo \
|
||||||
|
moreutils \
|
||||||
|
curl \
|
||||||
|
brotli
|
||||||
|
|
||||||
|
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||||
|
&& wget --quiet -O - ${odbc_driver_url} | tar --strip-components=1 -xz -C /tmp/clickhouse-odbc-tmp \
|
||||||
|
&& cp /tmp/clickhouse-odbc-tmp/lib64/*.so /usr/local/lib/ \
|
||||||
|
&& odbcinst -i -d -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbcinst.ini.sample \
|
||||||
|
&& odbcinst -i -s -l -f /tmp/clickhouse-odbc-tmp/share/doc/clickhouse-odbc/config/odbc.ini.sample \
|
||||||
|
&& rm -rf /tmp/clickhouse-odbc-tmp
|
||||||
|
|
||||||
|
# This symlink required by gcc to find lld compiler
|
||||||
|
RUN ln -s /usr/bin/lld-10 /usr/bin/ld.lld
|
||||||
|
|
||||||
|
ENV TZ=Europe/Moscow
|
||||||
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
|
|
||||||
|
COPY run.sh /
|
||||||
|
CMD ["/bin/bash", "/run.sh"]
|
97
docker/test/fasttest/run.sh
Executable file
97
docker/test/fasttest/run.sh
Executable file
@ -0,0 +1,97 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x -e
|
||||||
|
|
||||||
|
ls -la
|
||||||
|
|
||||||
|
git clone https://github.com/ClickHouse/ClickHouse.git | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/clone_log.txt
|
||||||
|
cd ClickHouse
|
||||||
|
CLICKHOUSE_DIR=`pwd`
|
||||||
|
|
||||||
|
|
||||||
|
if [ "$PULL_REQUEST_NUMBER" != "0" ]; then
|
||||||
|
if git fetch origin "+refs/pull/$PULL_REQUEST_NUMBER/merge"; then
|
||||||
|
git checkout FETCH_HEAD
|
||||||
|
echo 'Clonned merge head'
|
||||||
|
else
|
||||||
|
git fetch
|
||||||
|
git checkout $COMMIT_SHA
|
||||||
|
echo 'Checked out to commit'
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [ "$COMMIT_SHA" != "" ]; then
|
||||||
|
git checkout $COMMIT_SHA
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
SUBMODULES_TO_UPDATE="contrib/boost contrib/zlib-ng contrib/libxml2 contrib/poco contrib/libunwind contrib/ryu contrib/fmtlib contrib/base64 contrib/cctz contrib/libcpuid contrib/double-conversion contrib/libcxx contrib/libcxxabi contrib/libc-headers contrib/lz4 contrib/zstd contrib/fastops contrib/rapidjson contrib/re2 contrib/sparsehash-c11"
|
||||||
|
|
||||||
|
git submodule update --init --recursive $SUBMODULES_TO_UPDATE | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/submodule_log.txt
|
||||||
|
|
||||||
|
export CMAKE_LIBS_CONFIG="-DENABLE_LIBRARIES=0 -DENABLE_TESTS=0 -DENABLE_UTILS=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_THINLTO=0 -DUSE_UNWIND=1"
|
||||||
|
|
||||||
|
export CCACHE_DIR=/ccache
|
||||||
|
export CCACHE_BASEDIR=/ClickHouse
|
||||||
|
export CCACHE_NOHASHDIR=true
|
||||||
|
export CCACHE_COMPILERCHECK=content
|
||||||
|
export CCACHE_MAXSIZE=15G
|
||||||
|
|
||||||
|
ccache --show-stats ||:
|
||||||
|
ccache --zero-stats ||:
|
||||||
|
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
CLICKHOUSE_BUILD_DIR=`pwd`
|
||||||
|
cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_CXX_COMPILER=clang++-10 -DCMAKE_C_COMPILER=clang-10 $CMAKE_LIBS_CONFIG | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/cmake_log.txt
|
||||||
|
ninja clickhouse-bundle | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/build_log.txt
|
||||||
|
ninja install | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/install_log.txt
|
||||||
|
|
||||||
|
|
||||||
|
ccache --show-stats ||:
|
||||||
|
|
||||||
|
mkdir -p /etc/clickhouse-server
|
||||||
|
mkdir -p /etc/clickhouse-client
|
||||||
|
mkdir -p /etc/clickhouse-server/config.d
|
||||||
|
mkdir -p /etc/clickhouse-server/users.d
|
||||||
|
mkdir -p /var/log/clickhouse-server
|
||||||
|
cp $CLICKHOUSE_DIR/programs/server/config.xml /etc/clickhouse-server/
|
||||||
|
cp $CLICKHOUSE_DIR/programs/server/users.xml /etc/clickhouse-server/
|
||||||
|
|
||||||
|
mkdir -p /etc/clickhouse-server/dict_examples
|
||||||
|
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||||
|
#ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||||
|
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||||
|
|
||||||
|
clickhouse-server --config /etc/clickhouse-server/config.xml --daemon
|
||||||
|
|
||||||
|
until clickhouse-client --query "SELECT 1"
|
||||||
|
do
|
||||||
|
sleep 0.1
|
||||||
|
done
|
||||||
|
|
||||||
|
TESTS_TO_SKIP="parquet avro h3 odbc mysql sha256 _orc_ arrow 01098_temporary_and_external_tables 01083_expressions_in_engine_arguments hdfs 00911_tautological_compare protobuf capnproto java_hash hashing secure 00490_special_line_separators_and_characters_outside_of_bmp 00436_convert_charset 00105_shard_collations 01354_order_by_tuple_collate_const 01292_create_user 01098_msgpack_format 00929_multi_match_edit_distance 00926_multimatch 00834_cancel_http_readonly_queries_on_client_close brotli parallel_alter 00302_http_compression 00417_kill_query 01294_lazy_database_concurrent 01193_metadata_loading base64 01031_mutations_interpreter_and_context json client 01305_replica_create_drop_zookeeper 01092_memory_profiler 01355_ilike 01281_unsucceeded_insert_select_queries_counter live_view limit_memory memory_limit memory_leak 00110_external_sort 00682_empty_parts_merge 00701_rollup 00109_shard_totals_after_having"
|
||||||
|
|
||||||
|
clickhouse-test -j 4 --no-long --testname --shard --zookeeper --skip $TESTS_TO_SKIP 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee /test_output/test_log.txt
|
||||||
|
|
||||||
|
mv /var/log/clickhouse-server/* /test_output
|
@ -11,6 +11,7 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
MINIO_ACCESS_KEY: minio
|
MINIO_ACCESS_KEY: minio
|
||||||
MINIO_SECRET_KEY: minio123
|
MINIO_SECRET_KEY: minio123
|
||||||
|
MINIO_PROMETHEUS_AUTH_TYPE: public
|
||||||
command: server --address :9001 --certs-dir /certs /data1-1
|
command: server --address :9001 --certs-dir /certs /data1-1
|
||||||
depends_on:
|
depends_on:
|
||||||
- proxy1
|
- proxy1
|
||||||
|
@ -498,7 +498,8 @@ create table queries engine File(TSVWithNamesAndTypes, 'report/queries.tsv')
|
|||||||
|
|
||||||
left, right, diff, stat_threshold,
|
left, right, diff, stat_threshold,
|
||||||
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
if(report_threshold > 0, report_threshold, 0.10) as report_threshold,
|
||||||
test, query_index, query_display_name
|
query_metric_stats.test test, query_metric_stats.query_index query_index,
|
||||||
|
query_display_name
|
||||||
from query_metric_stats
|
from query_metric_stats
|
||||||
left join file('analyze/report-thresholds.tsv', TSV,
|
left join file('analyze/report-thresholds.tsv', TSV,
|
||||||
'test text, report_threshold float') thresholds
|
'test text, report_threshold float') thresholds
|
||||||
@ -666,7 +667,8 @@ create view query_display_names as select * from
|
|||||||
|
|
||||||
create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
create table unstable_query_runs engine File(TSVWithNamesAndTypes,
|
||||||
'unstable-query-runs.$version.rep') as
|
'unstable-query-runs.$version.rep') as
|
||||||
select test, query_index, query_display_name, query_id
|
select query_runs.test test, query_runs.query_index query_index,
|
||||||
|
query_display_name, query_id
|
||||||
from query_runs
|
from query_runs
|
||||||
join queries_for_flamegraph on
|
join queries_for_flamegraph on
|
||||||
query_runs.test = queries_for_flamegraph.test
|
query_runs.test = queries_for_flamegraph.test
|
||||||
|
@ -55,18 +55,21 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
|
|||||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||||
|
|
||||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
|
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/;
|
|
||||||
|
# Retain any pre-existing config and allow ClickHouse to load those if required
|
||||||
|
ln -s --backup=simple --suffix=_original.xml \
|
||||||
|
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||||
|
|
||||||
|
|
||||||
service zookeeper start
|
service zookeeper start
|
||||||
|
@ -17,7 +17,6 @@ ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config
|
|||||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
|
||||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||||
@ -33,6 +32,10 @@ ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
|||||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||||
|
|
||||||
|
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||||
|
ln -s --backup=simple --suffix=_original.xml \
|
||||||
|
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||||
|
|
||||||
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
if [[ -n "$USE_POLYMORPHIC_PARTS" ]] && [[ "$USE_POLYMORPHIC_PARTS" -eq 1 ]]; then
|
||||||
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
ln -s /usr/share/clickhouse-test/config/polymorphic_parts.xml /etc/clickhouse-server/config.d/
|
||||||
fi
|
fi
|
||||||
|
@ -46,27 +46,30 @@ ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-serv
|
|||||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/dict_examples/; \
|
||||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/dict_examples/;
|
||||||
|
|
||||||
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/zookeeper.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/listen.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/text_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/metric_log.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/readonly.xml /etc/clickhouse-server/users.d/; \
|
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/access_management.xml /etc/clickhouse-server/users.d/; \
|
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/ints_dictionary.xml /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/strings_dictionary.xml /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/decimals_dictionary.xml /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/macros.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/disks.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/secure_ports.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/clusters.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/
|
||||||
ln -s /usr/share/clickhouse-test/config/graphite.xml /etc/clickhouse-server/config.d/; \
|
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/server.key /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/server.crt /etc/clickhouse-server/; \
|
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/
|
||||||
ln -s /usr/share/clickhouse-test/config/dhparam.pem /etc/clickhouse-server/; \
|
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
||||||
ln -sf /usr/share/clickhouse-test/config/client_config.xml /etc/clickhouse-client/config.xml
|
|
||||||
|
# Retain any pre-existing config and allow ClickHouse to load it if required
|
||||||
|
ln -s --backup=simple --suffix=_original.xml \
|
||||||
|
/usr/share/clickhouse-test/config/query_masking_rules.xml /etc/clickhouse-server/config.d/
|
||||||
|
|
||||||
service zookeeper start
|
service zookeeper start
|
||||||
sleep 5
|
sleep 5
|
||||||
|
@ -23,28 +23,7 @@ RUN apt-get update -y \
|
|||||||
brotli
|
brotli
|
||||||
|
|
||||||
COPY ./stress /stress
|
COPY ./stress /stress
|
||||||
|
COPY run.sh /
|
||||||
|
|
||||||
ENV DATASETS="hits visits"
|
ENV DATASETS="hits visits"
|
||||||
|
CMD ["/bin/bash", "/run.sh"]
|
||||||
CMD dpkg -i package_folder/clickhouse-common-static_*.deb; \
|
|
||||||
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb; \
|
|
||||||
dpkg -i package_folder/clickhouse-server_*.deb; \
|
|
||||||
dpkg -i package_folder/clickhouse-client_*.deb; \
|
|
||||||
dpkg -i package_folder/clickhouse-test_*.deb; \
|
|
||||||
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/; \
|
|
||||||
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/; \
|
|
||||||
echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment; \
|
|
||||||
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment; \
|
|
||||||
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment; \
|
|
||||||
service clickhouse-server start && sleep 5 \
|
|
||||||
&& /s3downloader --dataset-names $DATASETS \
|
|
||||||
&& chmod 777 -R /var/lib/clickhouse \
|
|
||||||
&& clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary" \
|
|
||||||
&& clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test" \
|
|
||||||
&& service clickhouse-server restart && sleep 5 \
|
|
||||||
&& clickhouse-client --query "SHOW TABLES FROM datasets" \
|
|
||||||
&& clickhouse-client --query "SHOW TABLES FROM test" \
|
|
||||||
&& clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits" \
|
|
||||||
&& clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits" \
|
|
||||||
&& clickhouse-client --query "SHOW TABLES FROM test" \
|
|
||||||
&& ./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
|
||||||
|
56
docker/test/stress/run.sh
Executable file
56
docker/test/stress/run.sh
Executable file
@ -0,0 +1,56 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
dpkg -i package_folder/clickhouse-common-static_*.deb
|
||||||
|
dpkg -i package_folder/clickhouse-common-static-dbg_*.deb
|
||||||
|
dpkg -i package_folder/clickhouse-server_*.deb
|
||||||
|
dpkg -i package_folder/clickhouse-client_*.deb
|
||||||
|
dpkg -i package_folder/clickhouse-test_*.deb
|
||||||
|
|
||||||
|
function wait_server()
|
||||||
|
{
|
||||||
|
counter=0
|
||||||
|
until clickhouse-client --query "SELECT 1"
|
||||||
|
do
|
||||||
|
if [ "$counter" -gt 120 ]
|
||||||
|
then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 0.5
|
||||||
|
counter=$(($counter + 1))
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
ln -s /usr/share/clickhouse-test/config/log_queries.xml /etc/clickhouse-server/users.d/
|
||||||
|
ln -s /usr/share/clickhouse-test/config/part_log.xml /etc/clickhouse-server/config.d/
|
||||||
|
|
||||||
|
echo "TSAN_OPTIONS='halt_on_error=1 history_size=7 ignore_noninstrumented_modules=1 verbosity=1'" >> /etc/environment
|
||||||
|
echo "UBSAN_OPTIONS='print_stacktrace=1'" >> /etc/environment
|
||||||
|
echo "ASAN_OPTIONS='malloc_context_size=10 verbosity=1 allocator_release_to_os_interval_ms=10000'" >> /etc/environment
|
||||||
|
|
||||||
|
service clickhouse-server start
|
||||||
|
|
||||||
|
wait_server
|
||||||
|
|
||||||
|
/s3downloader --dataset-names $DATASETS
|
||||||
|
chmod 777 -R /var/lib/clickhouse
|
||||||
|
clickhouse-client --query "ATTACH DATABASE IF NOT EXISTS datasets ENGINE = Ordinary"
|
||||||
|
clickhouse-client --query "CREATE DATABASE IF NOT EXISTS test"
|
||||||
|
service clickhouse-server restart
|
||||||
|
|
||||||
|
wait_server
|
||||||
|
|
||||||
|
clickhouse-client --query "SHOW TABLES FROM datasets"
|
||||||
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
|
clickhouse-client --query "RENAME TABLE datasets.hits_v1 TO test.hits"
|
||||||
|
clickhouse-client --query "RENAME TABLE datasets.visits_v1 TO test.visits"
|
||||||
|
clickhouse-client --query "SHOW TABLES FROM test"
|
||||||
|
|
||||||
|
./stress --output-folder test_output --skip-func-tests "$SKIP_TESTS_OPTION"
|
||||||
|
|
||||||
|
service clickhouse-server restart
|
||||||
|
|
||||||
|
wait_server
|
||||||
|
|
||||||
|
clickhouse-client --query "SELECT 'Server successfuly started'" > /test_output/alive_check.txt || echo 'Server failed to start' > /test_output/alive_check.txt
|
@ -41,15 +41,6 @@ def run_func_test(cmd, output_prefix, num_processes, skip_tests_option):
|
|||||||
return pipes
|
return pipes
|
||||||
|
|
||||||
|
|
||||||
def check_clickhouse_alive(cmd):
|
|
||||||
try:
|
|
||||||
logging.info("Checking ClickHouse still alive")
|
|
||||||
check_call("{} --query \"select 'Still alive'\"".format(cmd), shell=True)
|
|
||||||
return True
|
|
||||||
except:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
|
||||||
parser = argparse.ArgumentParser(description="ClickHouse script for running stresstest")
|
parser = argparse.ArgumentParser(description="ClickHouse script for running stresstest")
|
||||||
@ -65,29 +56,18 @@ if __name__ == "__main__":
|
|||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
func_pipes = []
|
func_pipes = []
|
||||||
perf_process = None
|
perf_process = None
|
||||||
try:
|
perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder)
|
||||||
perf_process = run_perf_test(args.perf_test_cmd, args.perf_test_xml_path, args.output_folder)
|
func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests)
|
||||||
func_pipes = run_func_test(args.test_cmd, args.output_folder, args.num_parallel, args.skip_func_tests)
|
|
||||||
|
|
||||||
logging.info("Will wait functests to finish")
|
logging.info("Will wait functests to finish")
|
||||||
while True:
|
while True:
|
||||||
retcodes = []
|
retcodes = []
|
||||||
for p in func_pipes:
|
for p in func_pipes:
|
||||||
if p.poll() is not None:
|
if p.poll() is not None:
|
||||||
retcodes.append(p.returncode)
|
retcodes.append(p.returncode)
|
||||||
if len(retcodes) == len(func_pipes):
|
if len(retcodes) == len(func_pipes):
|
||||||
break
|
break
|
||||||
logging.info("Finished %s from %s processes", len(retcodes), len(func_pipes))
|
logging.info("Finished %s from %s processes", len(retcodes), len(func_pipes))
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
|
|
||||||
if not check_clickhouse_alive(args.client_cmd):
|
logging.info("Stress test finished")
|
||||||
raise Exception("Stress failed, results in logs")
|
|
||||||
else:
|
|
||||||
logging.info("Stress is ok")
|
|
||||||
except Exception as ex:
|
|
||||||
raise ex
|
|
||||||
finally:
|
|
||||||
if os.path.exists(args.server_log_folder):
|
|
||||||
logging.info("Copying server log files")
|
|
||||||
for log_file in os.listdir(args.server_log_folder):
|
|
||||||
shutil.copy(os.path.join(args.server_log_folder, log_file), os.path.join(args.output_folder, log_file))
|
|
||||||
|
@ -35,7 +35,7 @@ RUN apt-get update \
|
|||||||
ENV TZ=Europe/Moscow
|
ENV TZ=Europe/Moscow
|
||||||
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
|
||||||
|
|
||||||
RUN pip3 install urllib3 testflows==1.6.24 docker-compose docker dicttoxml kazoo tzlocal
|
RUN pip3 install urllib3 testflows==1.6.39 docker-compose docker dicttoxml kazoo tzlocal
|
||||||
|
|
||||||
ENV DOCKER_CHANNEL stable
|
ENV DOCKER_CHANNEL stable
|
||||||
ENV DOCKER_VERSION 17.09.1-ce
|
ENV DOCKER_VERSION 17.09.1-ce
|
||||||
|
@ -24,7 +24,7 @@ See the detailed description of the [CREATE TABLE](../../../sql-reference/statem
|
|||||||
|
|
||||||
**Engine Parameters**
|
**Engine Parameters**
|
||||||
|
|
||||||
- `join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [JOIN strictness](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [JOIN type](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – Key columns from the `USING` clause that the `JOIN` operation is made with.
|
- `k1[, k2, ...]` – Key columns from the `USING` clause that the `JOIN` operation is made with.
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ toc_title: Adopters
|
|||||||
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
| <a href="https://www.lbl.gov" class="favicon">Lawrence Berkeley National Laboratory</a> | Research | Traffic analysis | 1 server | 11.8 TiB | [Slides in English, April 2019](https://www.smitasin.com/presentations/2019-04-17_DOE-NSM.pdf) |
|
||||||
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
| <a href="https://lifestreet.com/" class="favicon">LifeStreet</a> | Ad network | Main product | 75 servers (3 replicas) | 5.27 PiB | [Blog post in Russian, February 2017](https://habr.com/en/post/322620/) |
|
||||||
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
| <a href="https://mcs.mail.ru/" class="favicon">Mail.ru Cloud Solutions</a> | Cloud services | Main product | — | — | [Article in Russian](https://mcs.mail.ru/help/db-create/clickhouse#) |
|
||||||
|
| <a href="https://tech.mymarilyn.ru" class="favicon">Marilyn</a> | Advertising | Statistics | — | — | [Talk in Russian, June 2017](https://www.youtube.com/watch?v=iXlIgx2khwc) |
|
||||||
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
| <a href="https://www.messagebird.com" class="favicon">MessageBird</a> | Telecommunications | Statistics | — | — | [Slides in English, November 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup20/messagebird.pdf) |
|
||||||
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
| <a href="https://www.mgid.com/" class="favicon">MGID</a> | Ad network | Web-analytics | — | — | [Blog post in Russian, April 2020](http://gs-studio.com/news-about-it/32777----clickhouse---c) |
|
||||||
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
| <a href="https://www.oneapm.com/" class="favicon">OneAPM</a> | Monitorings and Data Analysis | Main product | — | — | [Slides in Chinese, October 2018](https://github.com/ClickHouse/clickhouse-presentations/blob/master/meetup19/8.%20clickhouse在OneAPM的应用%20杜龙.pdf) |
|
||||||
|
@ -471,7 +471,7 @@ Default value: 0.
|
|||||||
|
|
||||||
See also:
|
See also:
|
||||||
|
|
||||||
- [JOIN strictness](../../sql-reference/statements/select/join.md#select-join-strictness)
|
- [JOIN strictness](../../sql-reference/statements/select/join.md#join-settings)
|
||||||
|
|
||||||
## temporary\_files\_codec {#temporary_files_codec}
|
## temporary\_files\_codec {#temporary_files_codec}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@ Syntax:
|
|||||||
``` sql
|
``` sql
|
||||||
SELECT <expr_list>
|
SELECT <expr_list>
|
||||||
FROM <left_table>
|
FROM <left_table>
|
||||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||||
(ON <expr_list>)|(USING <column_list>) ...
|
(ON <expr_list>)|(USING <column_list>) ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -33,17 +33,13 @@ Additional join types available in ClickHouse:
|
|||||||
|
|
||||||
- `LEFT SEMI JOIN` and `RIGHT SEMI JOIN`, a whitelist on “join keys”, without producing a cartesian product.
|
- `LEFT SEMI JOIN` and `RIGHT SEMI JOIN`, a whitelist on “join keys”, without producing a cartesian product.
|
||||||
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
|
- `LEFT ANTI JOIN` and `RIGHT ANTI JOIN`, a blacklist on “join keys”, without producing a cartesian product.
|
||||||
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||||
|
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||||
|
|
||||||
## Strictness {#select-join-strictness}
|
## Setting {#join-settings}
|
||||||
|
|
||||||
Modifies how matching by “join keys” is performed
|
|
||||||
|
|
||||||
- `ALL` — The standard `JOIN` behavior in SQL as described above. The default.
|
|
||||||
- `ANY` — Partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
|
||||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
|
||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
The default strictness value can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
The default join type can be overriden using [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) setting.
|
||||||
|
|
||||||
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
Also the behavior of ClickHouse server for `ANY JOIN` operations depends on the [any_join_distinct_right_table_keys](../../../operations/settings/settings.md#any_join_distinct_right_table_keys) setting.
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ Vea la descripción detallada del [CREATE TABLE](../../../sql-reference/statemen
|
|||||||
|
|
||||||
**Parámetros del motor**
|
**Parámetros del motor**
|
||||||
|
|
||||||
- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [ÚNETE a la rigurosidad](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [Tipo de unión](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – Key columns from the `USING` cláusula que el `JOIN` operación se hace con.
|
- `k1[, k2, ...]` – Key columns from the `USING` cláusula que el `JOIN` operación se hace con.
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ Sintaxis:
|
|||||||
``` sql
|
``` sql
|
||||||
SELECT <expr_list>
|
SELECT <expr_list>
|
||||||
FROM <left_table>
|
FROM <left_table>
|
||||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||||
(ON <expr_list>)|(USING <column_list>) ...
|
(ON <expr_list>)|(USING <column_list>) ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -34,14 +34,10 @@ Tipos de unión adicionales disponibles en ClickHouse:
|
|||||||
|
|
||||||
- `LEFT SEMI JOIN` y `RIGHT SEMI JOIN`, una lista blanca en “join keys”, sin producir un producto cartesiano.
|
- `LEFT SEMI JOIN` y `RIGHT SEMI JOIN`, una lista blanca en “join keys”, sin producir un producto cartesiano.
|
||||||
- `LEFT ANTI JOIN` y `RIGHT ANTI JOIN`, una lista negra sobre “join keys”, sin producir un producto cartesiano.
|
- `LEFT ANTI JOIN` y `RIGHT ANTI JOIN`, una lista negra sobre “join keys”, sin producir un producto cartesiano.
|
||||||
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||||
|
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||||
|
|
||||||
## Rigor {#select-join-strictness}
|
## Setting {#join-settings}
|
||||||
|
|
||||||
Modifica cómo coincidir por “join keys” se realiza
|
|
||||||
|
|
||||||
- `ALL` — The standard `JOIN` comportamiento en SQL como se describió anteriormente. Predeterminado.
|
|
||||||
- `ANY` — Partially (for opposite side of `LEFT` y `RIGHT`) o completamente (para `INNER` y `FULL`) deshabilita el producto cartesiano para `JOIN` tipo.
|
|
||||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` el uso se describe a continuación.
|
|
||||||
|
|
||||||
!!! note "Nota"
|
!!! note "Nota"
|
||||||
El valor de rigor predeterminado se puede anular usando [Por favor, introduzca su dirección de correo electrónico](../../../operations/settings/settings.md#settings-join_default_strictness) configuración.
|
El valor de rigor predeterminado se puede anular usando [Por favor, introduzca su dirección de correo electrónico](../../../operations/settings/settings.md#settings-join_default_strictness) configuración.
|
||||||
|
@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
**پارامترهای موتور**
|
**پارامترهای موتور**
|
||||||
|
|
||||||
- `join_strictness` – [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [پیوستن به سختی](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [پیوستن به نوع](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [پیوستن به نوع](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده.
|
- `k1[, k2, ...]` – Key columns from the `USING` بند که `JOIN` عملیات با ساخته شده.
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ machine_translated_rev: 72537a2d527c63c07aa5d2361a8829f3895cf2bd
|
|||||||
``` sql
|
``` sql
|
||||||
SELECT <expr_list>
|
SELECT <expr_list>
|
||||||
FROM <left_table>
|
FROM <left_table>
|
||||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||||
(ON <expr_list>)|(USING <column_list>) ...
|
(ON <expr_list>)|(USING <column_list>) ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -34,15 +34,12 @@ FROM <left_table>
|
|||||||
|
|
||||||
- `LEFT SEMI JOIN` و `RIGHT SEMI JOIN`, یک لیست سفید در “join keys”, بدون تولید محصول دکارتی.
|
- `LEFT SEMI JOIN` و `RIGHT SEMI JOIN`, یک لیست سفید در “join keys”, بدون تولید محصول دکارتی.
|
||||||
- `LEFT ANTI JOIN` و `RIGHT ANTI JOIN`, لیست سیاه در “join keys”, بدون تولید محصول دکارتی.
|
- `LEFT ANTI JOIN` و `RIGHT ANTI JOIN`, لیست سیاه در “join keys”, بدون تولید محصول دکارتی.
|
||||||
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` و `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||||
|
- `ASOF JOIN` و `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||||
|
|
||||||
## سختی {#select-join-strictness}
|
## Setting {#join-settings}
|
||||||
|
|
||||||
تغییر چگونگی تطبیق توسط “join keys” انجام شده است
|
تغییر چگونگی تطبیق توسط “join keys” انجام شده است
|
||||||
|
|
||||||
- `ALL` — The standard `JOIN` رفتار در گذاشتن همانطور که در بالا توضیح. به طور پیش فرض.
|
|
||||||
- `ANY` — Partially (for opposite side of `LEFT` و `RIGHT`) یا به طور کامل (برای `INNER` و `FULL`) غیر فعال محصول دکارتی برای استاندارد `JOIN` انواع.
|
|
||||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` استفاده در زیر توضیح داده شده است.
|
|
||||||
|
|
||||||
!!! note "یادداشت"
|
!!! note "یادداشت"
|
||||||
مقدار سختگیرانه پیش فرض را می توان با استفاده از لغو [بررسی اجمالی](../../../operations/settings/settings.md#settings-join_default_strictness) تنظیمات.
|
مقدار سختگیرانه پیش فرض را می توان با استفاده از لغو [بررسی اجمالی](../../../operations/settings/settings.md#settings-join_default_strictness) تنظیمات.
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ Voir la description détaillée de la [CREATE TABLE](../../../sql-reference/stat
|
|||||||
|
|
||||||
**Les Paramètres Du Moteur**
|
**Les Paramètres Du Moteur**
|
||||||
|
|
||||||
- `join_strictness` – [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [ADHÉRER à la rigueur](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [Type de jointure](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [Type de jointure](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la.
|
- `k1[, k2, ...]` – Key columns from the `USING` la clause que l' `JOIN` l'opération est faite avec de la.
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ Syntaxe:
|
|||||||
``` sql
|
``` sql
|
||||||
SELECT <expr_list>
|
SELECT <expr_list>
|
||||||
FROM <left_table>
|
FROM <left_table>
|
||||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||||
(ON <expr_list>)|(USING <column_list>) ...
|
(ON <expr_list>)|(USING <column_list>) ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -34,14 +34,10 @@ Autres types de jointure disponibles dans ClickHouse:
|
|||||||
|
|
||||||
- `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien.
|
- `LEFT SEMI JOIN` et `RIGHT SEMI JOIN` une liste blanche sur “join keys”, sans produire un produit cartésien.
|
||||||
- `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien.
|
- `LEFT ANTI JOIN` et `RIGHT ANTI JOIN` une liste noire sur “join keys”, sans produire un produit cartésien.
|
||||||
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` et `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||||
|
- `ASOF JOIN` et `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||||
|
|
||||||
## Rigueur {#select-join-strictness}
|
## Setting {#join-settings}
|
||||||
|
|
||||||
Modifie la façon dont la correspondance par “join keys” est effectué
|
|
||||||
|
|
||||||
- `ALL` — The standard `JOIN` comportement en SQL comme décrit ci-dessus. Défaut.
|
|
||||||
- `ANY` — Partially (for opposite side of `LEFT` et `RIGHT`) ou complètement (pour `INNER` et `FULL`) désactive le produit cartésien de la norme `JOIN` type.
|
|
||||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` l'utilisation est décrite ci-dessous.
|
|
||||||
|
|
||||||
!!! note "Note"
|
!!! note "Note"
|
||||||
La valeur de rigueur par défaut peut être remplacée à l'aide [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre.
|
La valeur de rigueur par défaut peut être remplacée à l'aide [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) paramètre.
|
||||||
|
@ -23,7 +23,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
**エンジン変数**
|
**エンジン変数**
|
||||||
|
|
||||||
- `join_strictness` – [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [厳密に結合する](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [結合タイプ](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [結合タイプ](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – Key columns from the `USING` 句は、 `JOIN` 操作はでなされる。
|
- `k1[, k2, ...]` – Key columns from the `USING` 句は、 `JOIN` 操作はでなされる。
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
**Параметры движка**
|
**Параметры движка**
|
||||||
|
|
||||||
- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-strictness).
|
- `join_strictness` – [строгость JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||||
- `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
- `join_type` – [тип JOIN](../../../engines/table-engines/special/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.
|
- `k1[, k2, ...]` – ключевые столбцы секции `USING` с которыми выполняется операция `JOIN`.
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ Join создаёт новую таблицу путем объединения
|
|||||||
``` sql
|
``` sql
|
||||||
SELECT <expr_list>
|
SELECT <expr_list>
|
||||||
FROM <left_table>
|
FROM <left_table>
|
||||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||||
(ON <expr_list>)|(USING <column_list>) ...
|
(ON <expr_list>)|(USING <column_list>) ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -29,18 +29,14 @@ FROM <left_table>
|
|||||||
|
|
||||||
- `LEFT SEMI JOIN` и `RIGHT SEMI JOIN`, белый список по ключам соединения, не производит декартово произведение.
|
- `LEFT SEMI JOIN` и `RIGHT SEMI JOIN`, белый список по ключам соединения, не производит декартово произведение.
|
||||||
- `LEFT ANTI JOIN` и `RIGHT ANTI JOIN`, черный список по ключам соединения, не производит декартово произведение.
|
- `LEFT ANTI JOIN` и `RIGHT ANTI JOIN`, черный список по ключам соединения, не производит декартово произведение.
|
||||||
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` и `INNER ANY JOIN`, Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`.
|
||||||
|
- `ASOF JOIN` и `LEFT ASOF JOIN`, Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже.
|
||||||
|
|
||||||
## Строгость {#select-join-strictness}
|
## Настройки {#join-settings}
|
||||||
|
|
||||||
Изменяет способ сопоставления по ключам соединения:
|
|
||||||
|
|
||||||
- `ALL` — стандартное поведение `JOIN` в SQL, как описано выше. По умолчанию.
|
|
||||||
- `ANY` — Частично (для противоположных сторон `LEFT` и `RIGHT`) или полностью (для `INNER` и `FULL`) отключает декартово произведение для стандартых видов `JOIN`.
|
|
||||||
- `ASOF` — Для соединения последовательностей по нечеткому совпадению. Использование `ASOF JOIN` описано ниже.
|
|
||||||
|
|
||||||
!!! note "Примечание"
|
!!! note "Примечание"
|
||||||
Значение строгости по умолчанию может быть переопределено с помощью настройки [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
Значение строгости по умолчанию может быть переопределено с помощью настройки [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness).
|
||||||
|
|
||||||
### Использование ASOF JOIN {#asof-join-usage}
|
### Использование ASOF JOIN {#asof-join-usage}
|
||||||
|
|
||||||
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.
|
`ASOF JOIN` применим в том случае, когда необходимо объединять записи, которые не имеют точного совпадения.
|
||||||
|
@ -4,8 +4,11 @@
|
|||||||
# This script deploys ClickHouse website to your personal test subdomain.
|
# This script deploys ClickHouse website to your personal test subdomain.
|
||||||
#
|
#
|
||||||
# Before first use of this script:
|
# Before first use of this script:
|
||||||
# 1) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login)
|
# 1) Set up building documentation according to https://github.com/ClickHouse/ClickHouse/tree/master/docs/tools#use-buildpy-use-build-py
|
||||||
# 2) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain
|
# 2) Create https://github.com/GIT_USER/clickhouse.github.io repo (replace GIT_USER with your GitHub login)
|
||||||
|
# 3) Enable GitHub Pages in settings of this repo
|
||||||
|
# 4) Add file named CNAME in root of this repo with "GIT_USER-test.clickhouse.tech" content (without quotes)
|
||||||
|
# 5) Send email on address from https://clickhouse.tech/#contacts asking to create GIT_USER-test.clickhouse.tech domain
|
||||||
#
|
#
|
||||||
set -ex
|
set -ex
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ Ayrıntılı açıklamasına bakın [CREATE TABLE](../../../sql-reference/statem
|
|||||||
|
|
||||||
**Motor Parametreleri**
|
**Motor Parametreleri**
|
||||||
|
|
||||||
- `join_strictness` – [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [Katılık katılın](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [Birleştirme türü](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [Birleştirme türü](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – Key columns from the `USING` fık thera: `JOIN` işlemi yapılmamaktadır.
|
- `k1[, k2, ...]` – Key columns from the `USING` fık thera: `JOIN` işlemi yapılmamaktadır.
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ CREATE TABLE [IF NOT EXISTS] [db.]table_name [ON CLUSTER cluster]
|
|||||||
|
|
||||||
**引擎参数**
|
**引擎参数**
|
||||||
|
|
||||||
- `join_strictness` – [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-strictness).
|
- `join_strictness` – [JOIN 限制](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `join_type` – [JOIN 类型](../../../sql-reference/statements/select/join.md#select-join-types).
|
- `join_type` – [JOIN 类型](../../../sql-reference/statements/select/join.md#select-join-types).
|
||||||
- `k1[, k2, ...]` – 进行`JOIN` 操作时 `USING`语句用到的key列
|
- `k1[, k2, ...]` – 进行`JOIN` 操作时 `USING`语句用到的key列
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ Join通过使用一个或多个表的公共值合并来自一个或多个表的
|
|||||||
``` sql
|
``` sql
|
||||||
SELECT <expr_list>
|
SELECT <expr_list>
|
||||||
FROM <left_table>
|
FROM <left_table>
|
||||||
[GLOBAL] [ANY|ALL|ASOF] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI] JOIN <right_table>
|
[GLOBAL] [INNER|LEFT|RIGHT|FULL|CROSS] [OUTER|SEMI|ANTI|ANY|ASOF] JOIN <right_table>
|
||||||
(ON <expr_list>)|(USING <column_list>) ...
|
(ON <expr_list>)|(USING <column_list>) ...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -35,14 +35,10 @@ ClickHouse中提供的其他联接类型:
|
|||||||
|
|
||||||
- `LEFT SEMI JOIN` 和 `RIGHT SEMI JOIN`,白名单 “join keys”,而不产生笛卡尔积。
|
- `LEFT SEMI JOIN` 和 `RIGHT SEMI JOIN`,白名单 “join keys”,而不产生笛卡尔积。
|
||||||
- `LEFT ANTI JOIN` 和 `RIGHT ANTI JOIN`,黑名单 “join keys”,而不产生笛卡尔积。
|
- `LEFT ANTI JOIN` 和 `RIGHT ANTI JOIN`,黑名单 “join keys”,而不产生笛卡尔积。
|
||||||
|
- `LEFT ANY JOIN`, `RIGHT ANY JOIN` and `INNER ANY JOIN`, partially (for opposite side of `LEFT` and `RIGHT`) or completely (for `INNER` and `FULL`) disables the cartesian product for standard `JOIN` types.
|
||||||
|
- `ASOF JOIN` and `LEFT ASOF JOIN`, joining sequences with a non-exact match. `ASOF JOIN` usage is described below.
|
||||||
|
|
||||||
## 严格 {#select-join-strictness}
|
## 严格 {#join-settings}
|
||||||
|
|
||||||
修改如何匹配 “join keys” 执行
|
|
||||||
|
|
||||||
- `ALL` — The standard `JOIN` sql中的行为如上所述。 默认值。
|
|
||||||
- `ANY` — Partially (for opposite side of `LEFT` 和 `RIGHT`)或完全(为 `INNER` 和 `FULL`)禁用笛卡尔积为标准 `JOIN` 类型。
|
|
||||||
- `ASOF` — For joining sequences with a non-exact match. `ASOF JOIN` 用法描述如下。
|
|
||||||
|
|
||||||
!!! note "注"
|
!!! note "注"
|
||||||
可以使用以下方式复盖默认的严格性值 [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) 设置。
|
可以使用以下方式复盖默认的严格性值 [join\_default\_strictness](../../../operations/settings/settings.md#settings-join_default_strictness) 设置。
|
||||||
|
@ -215,6 +215,9 @@ try
|
|||||||
|
|
||||||
/// Skip networking
|
/// Skip networking
|
||||||
|
|
||||||
|
/// Sets external authenticators config (LDAP).
|
||||||
|
context->setExternalAuthenticatorsConfig(config());
|
||||||
|
|
||||||
setupUsers();
|
setupUsers();
|
||||||
|
|
||||||
/// Limit on total number of concurrently executing queries.
|
/// Limit on total number of concurrently executing queries.
|
||||||
|
@ -295,7 +295,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/** Context contains all that query execution is dependent:
|
/** Context contains all that query execution is dependent:
|
||||||
* settings, available functions, data types, aggregate functions, databases...
|
* settings, available functions, data types, aggregate functions, databases, ...
|
||||||
*/
|
*/
|
||||||
auto shared_context = Context::createShared();
|
auto shared_context = Context::createShared();
|
||||||
auto global_context = std::make_unique<Context>(Context::createGlobal(shared_context.get()));
|
auto global_context = std::make_unique<Context>(Context::createGlobal(shared_context.get()));
|
||||||
@ -543,6 +543,7 @@ int Server::main(const std::vector<std::string> & /*args*/)
|
|||||||
//buildLoggers(*config, logger());
|
//buildLoggers(*config, logger());
|
||||||
global_context->setClustersConfig(config);
|
global_context->setClustersConfig(config);
|
||||||
global_context->setMacros(std::make_unique<Macros>(*config, "macros"));
|
global_context->setMacros(std::make_unique<Macros>(*config, "macros"));
|
||||||
|
global_context->setExternalAuthenticatorsConfig(*config);
|
||||||
|
|
||||||
/// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default)
|
/// Setup protection to avoid accidental DROP for big tables (that are greater than 50 GB by default)
|
||||||
if (config->has("max_table_size_to_drop"))
|
if (config->has("max_table_size_to_drop"))
|
||||||
|
@ -215,6 +215,47 @@
|
|||||||
<!-- Path to folder where users and roles created by SQL commands are stored. -->
|
<!-- Path to folder where users and roles created by SQL commands are stored. -->
|
||||||
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
|
<access_control_path>/var/lib/clickhouse/access/</access_control_path>
|
||||||
|
|
||||||
|
<!-- External user directories (LDAP). -->
|
||||||
|
<ldap_servers>
|
||||||
|
<!-- List LDAP servers with their connection parameters here to later use them as authenticators for dedicated users,
|
||||||
|
who have 'ldap' authentication mechanism specified instead of 'password'.
|
||||||
|
Parameters:
|
||||||
|
host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
|
||||||
|
port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
|
||||||
|
auth_dn_prefix, auth_dn_suffix - prefix and suffix used to construct the DN to bind to.
|
||||||
|
Effectively, the resulting DN will be constructed as auth_dn_prefix + escape(user_name) + auth_dn_suffix string.
|
||||||
|
Note, that this implies that auth_dn_suffix should usually have comma ',' as its first non-space character.
|
||||||
|
enable_tls - flag to trigger use of secure connection to the LDAP server.
|
||||||
|
Specify 'no' for plain text (ldap://) protocol (not recommended).
|
||||||
|
Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
|
||||||
|
Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
|
||||||
|
tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
|
||||||
|
Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
|
||||||
|
tls_require_cert - SSL/TLS peer certificate verification behavior.
|
||||||
|
Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
|
||||||
|
tls_cert_file - path to certificate file.
|
||||||
|
tls_key_file - path to certificate key file.
|
||||||
|
tls_ca_cert_file - path to CA certificate file.
|
||||||
|
tls_ca_cert_dir - path to the directory containing CA certificates.
|
||||||
|
tls_cipher_suite - allowed cipher suite.
|
||||||
|
Example:
|
||||||
|
<my_ldap_server>
|
||||||
|
<host>localhost</host>
|
||||||
|
<port>636</port>
|
||||||
|
<auth_dn_prefix>uid=</auth_dn_prefix>
|
||||||
|
<auth_dn_suffix>,ou=users,dc=example,dc=com</auth_dn_suffix>
|
||||||
|
<enable_tls>yes</enable_tls>
|
||||||
|
<tls_minimum_protocol_version>tls1.2</tls_minimum_protocol_version>
|
||||||
|
<tls_require_cert>demand</tls_require_cert>
|
||||||
|
<tls_cert_file>/path/to/tls_cert_file</tls_cert_file>
|
||||||
|
<tls_key_file>/path/to/tls_key_file</tls_key_file>
|
||||||
|
<tls_ca_cert_file>/path/to/tls_ca_cert_file</tls_ca_cert_file>
|
||||||
|
<tls_ca_cert_dir>/path/to/tls_ca_cert_dir</tls_ca_cert_dir>
|
||||||
|
<tls_cipher_suite>ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384</tls_cipher_suite>
|
||||||
|
</my_ldap_server>
|
||||||
|
-->
|
||||||
|
</ldap_servers>
|
||||||
|
|
||||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||||
<users_config>users.xml</users_config>
|
<users_config>users.xml</users_config>
|
||||||
|
|
||||||
|
@ -44,6 +44,9 @@
|
|||||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||||
|
|
||||||
|
If you want to specify a previously defined LDAP server (see 'ldap_servers' in main config) for authentication, place its name in 'server' element inside 'ldap' element.
|
||||||
|
Example: <ldap><server>my_ldap_server</server></ldap>
|
||||||
|
|
||||||
How to generate decent password:
|
How to generate decent password:
|
||||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||||
In first line will be password and in second - corresponding SHA256.
|
In first line will be password and in second - corresponding SHA256.
|
||||||
|
@ -9,6 +9,7 @@
|
|||||||
#include <Access/QuotaCache.h>
|
#include <Access/QuotaCache.h>
|
||||||
#include <Access/QuotaUsage.h>
|
#include <Access/QuotaUsage.h>
|
||||||
#include <Access/SettingsProfilesCache.h>
|
#include <Access/SettingsProfilesCache.h>
|
||||||
|
#include <Access/ExternalAuthenticators.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
#include <Poco/ExpireCache.h>
|
#include <Poco/ExpireCache.h>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
@ -64,7 +65,8 @@ AccessControlManager::AccessControlManager()
|
|||||||
role_cache(std::make_unique<RoleCache>(*this)),
|
role_cache(std::make_unique<RoleCache>(*this)),
|
||||||
row_policy_cache(std::make_unique<RowPolicyCache>(*this)),
|
row_policy_cache(std::make_unique<RowPolicyCache>(*this)),
|
||||||
quota_cache(std::make_unique<QuotaCache>(*this)),
|
quota_cache(std::make_unique<QuotaCache>(*this)),
|
||||||
settings_profiles_cache(std::make_unique<SettingsProfilesCache>(*this))
|
settings_profiles_cache(std::make_unique<SettingsProfilesCache>(*this)),
|
||||||
|
external_authenticators(std::make_unique<ExternalAuthenticators>())
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,6 +81,12 @@ void AccessControlManager::setLocalDirectory(const String & directory_path)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void AccessControlManager::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config)
|
||||||
|
{
|
||||||
|
external_authenticators->setConfig(config, getLogger());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void AccessControlManager::setUsersConfig(const Poco::Util::AbstractConfiguration & users_config)
|
void AccessControlManager::setUsersConfig(const Poco::Util::AbstractConfiguration & users_config)
|
||||||
{
|
{
|
||||||
auto & users_config_access_storage = dynamic_cast<UsersConfigAccessStorage &>(getStorageByIndex(USERS_CONFIG_ACCESS_STORAGE_INDEX));
|
auto & users_config_access_storage = dynamic_cast<UsersConfigAccessStorage &>(getStorageByIndex(USERS_CONFIG_ACCESS_STORAGE_INDEX));
|
||||||
@ -163,4 +171,9 @@ std::shared_ptr<const SettingsChanges> AccessControlManager::getProfileSettings(
|
|||||||
return settings_profiles_cache->getProfileSettings(profile_name);
|
return settings_profiles_cache->getProfileSettings(profile_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const ExternalAuthenticators & AccessControlManager::getExternalAuthenticators() const
|
||||||
|
{
|
||||||
|
return *external_authenticators;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ class EnabledSettings;
|
|||||||
class SettingsProfilesCache;
|
class SettingsProfilesCache;
|
||||||
class SettingsProfileElements;
|
class SettingsProfileElements;
|
||||||
class ClientInfo;
|
class ClientInfo;
|
||||||
|
class ExternalAuthenticators;
|
||||||
struct Settings;
|
struct Settings;
|
||||||
|
|
||||||
|
|
||||||
@ -48,6 +49,7 @@ public:
|
|||||||
~AccessControlManager();
|
~AccessControlManager();
|
||||||
|
|
||||||
void setLocalDirectory(const String & directory);
|
void setLocalDirectory(const String & directory);
|
||||||
|
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
|
||||||
void setUsersConfig(const Poco::Util::AbstractConfiguration & users_config);
|
void setUsersConfig(const Poco::Util::AbstractConfiguration & users_config);
|
||||||
void setDefaultProfileName(const String & default_profile_name);
|
void setDefaultProfileName(const String & default_profile_name);
|
||||||
|
|
||||||
@ -85,6 +87,8 @@ public:
|
|||||||
|
|
||||||
std::shared_ptr<const SettingsChanges> getProfileSettings(const String & profile_name) const;
|
std::shared_ptr<const SettingsChanges> getProfileSettings(const String & profile_name) const;
|
||||||
|
|
||||||
|
const ExternalAuthenticators & getExternalAuthenticators() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class ContextAccessCache;
|
class ContextAccessCache;
|
||||||
std::unique_ptr<ContextAccessCache> context_access_cache;
|
std::unique_ptr<ContextAccessCache> context_access_cache;
|
||||||
@ -92,6 +96,7 @@ private:
|
|||||||
std::unique_ptr<RowPolicyCache> row_policy_cache;
|
std::unique_ptr<RowPolicyCache> row_policy_cache;
|
||||||
std::unique_ptr<QuotaCache> quota_cache;
|
std::unique_ptr<QuotaCache> quota_cache;
|
||||||
std::unique_ptr<SettingsProfilesCache> settings_profiles_cache;
|
std::unique_ptr<SettingsProfilesCache> settings_profiles_cache;
|
||||||
|
std::unique_ptr<ExternalAuthenticators> external_authenticators;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
#include <Access/Authentication.h>
|
#include <Access/Authentication.h>
|
||||||
|
#include <Access/ExternalAuthenticators.h>
|
||||||
|
#include <Access/LDAPClient.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <Poco/SHA1Engine.h>
|
#include <Poco/SHA1Engine.h>
|
||||||
|
|
||||||
@ -37,6 +39,9 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const
|
|||||||
case DOUBLE_SHA1_PASSWORD:
|
case DOUBLE_SHA1_PASSWORD:
|
||||||
return password_hash;
|
return password_hash;
|
||||||
|
|
||||||
|
case LDAP_SERVER:
|
||||||
|
throw Exception("Cannot get password double SHA1 for user with 'LDAP_SERVER' authentication.", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
case MAX_TYPE:
|
case MAX_TYPE:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -44,7 +49,7 @@ Authentication::Digest Authentication::getPasswordDoubleSHA1() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool Authentication::isCorrectPassword(const String & password_) const
|
bool Authentication::isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const
|
||||||
{
|
{
|
||||||
switch (type)
|
switch (type)
|
||||||
{
|
{
|
||||||
@ -75,6 +80,16 @@ bool Authentication::isCorrectPassword(const String & password_) const
|
|||||||
return encodeSHA1(first_sha1) == password_hash;
|
return encodeSHA1(first_sha1) == password_hash;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case LDAP_SERVER:
|
||||||
|
{
|
||||||
|
auto ldap_server_params = external_authenticators.getLDAPServerParams(server_name);
|
||||||
|
ldap_server_params.user = user_;
|
||||||
|
ldap_server_params.password = password_;
|
||||||
|
|
||||||
|
LDAPSimpleAuthClient ldap_client(ldap_server_params);
|
||||||
|
return ldap_client.check();
|
||||||
|
}
|
||||||
|
|
||||||
case MAX_TYPE:
|
case MAX_TYPE:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ namespace ErrorCodes
|
|||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class ExternalAuthenticators;
|
||||||
|
|
||||||
/// Authentication type and encrypted password for checking when an user logins.
|
/// Authentication type and encrypted password for checking when an user logins.
|
||||||
class Authentication
|
class Authentication
|
||||||
@ -38,6 +39,9 @@ public:
|
|||||||
/// This kind of hash is used by the `mysql_native_password` authentication plugin.
|
/// This kind of hash is used by the `mysql_native_password` authentication plugin.
|
||||||
DOUBLE_SHA1_PASSWORD,
|
DOUBLE_SHA1_PASSWORD,
|
||||||
|
|
||||||
|
/// Password is checked by a [remote] LDAP server. Connection will be made at each authentication attempt.
|
||||||
|
LDAP_SERVER,
|
||||||
|
|
||||||
MAX_TYPE,
|
MAX_TYPE,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -78,8 +82,14 @@ public:
|
|||||||
/// Allowed to use for Type::NO_PASSWORD, Type::PLAINTEXT_PASSWORD, Type::DOUBLE_SHA1_PASSWORD.
|
/// Allowed to use for Type::NO_PASSWORD, Type::PLAINTEXT_PASSWORD, Type::DOUBLE_SHA1_PASSWORD.
|
||||||
Digest getPasswordDoubleSHA1() const;
|
Digest getPasswordDoubleSHA1() const;
|
||||||
|
|
||||||
|
/// Sets an external authentication server name.
|
||||||
|
/// When authentication type is LDAP_SERVER, server name is expected to be the name of a preconfigured LDAP server.
|
||||||
|
const String & getServerName() const;
|
||||||
|
void setServerName(const String & server_name_);
|
||||||
|
|
||||||
/// Checks if the provided password is correct. Returns false if not.
|
/// Checks if the provided password is correct. Returns false if not.
|
||||||
bool isCorrectPassword(const String & password) const;
|
/// User name and external authenticators' info are used only by some specific authentication type (e.g., LDAP_SERVER).
|
||||||
|
bool isCorrectPassword(const String & password_, const String & user_, const ExternalAuthenticators & external_authenticators) const;
|
||||||
|
|
||||||
friend bool operator ==(const Authentication & lhs, const Authentication & rhs) { return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash); }
|
friend bool operator ==(const Authentication & lhs, const Authentication & rhs) { return (lhs.type == rhs.type) && (lhs.password_hash == rhs.password_hash); }
|
||||||
friend bool operator !=(const Authentication & lhs, const Authentication & rhs) { return !(lhs == rhs); }
|
friend bool operator !=(const Authentication & lhs, const Authentication & rhs) { return !(lhs == rhs); }
|
||||||
@ -93,6 +103,7 @@ private:
|
|||||||
|
|
||||||
Type type = Type::NO_PASSWORD;
|
Type type = Type::NO_PASSWORD;
|
||||||
Digest password_hash;
|
Digest password_hash;
|
||||||
|
String server_name;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -127,6 +138,11 @@ inline const Authentication::TypeInfo & Authentication::TypeInfo::get(Type type_
|
|||||||
static const auto info = make_info("DOUBLE_SHA1_PASSWORD");
|
static const auto info = make_info("DOUBLE_SHA1_PASSWORD");
|
||||||
return info;
|
return info;
|
||||||
}
|
}
|
||||||
|
case LDAP_SERVER:
|
||||||
|
{
|
||||||
|
static const auto info = make_info("LDAP_SERVER");
|
||||||
|
return info;
|
||||||
|
}
|
||||||
case MAX_TYPE: break;
|
case MAX_TYPE: break;
|
||||||
}
|
}
|
||||||
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
|
throw Exception("Unknown authentication type: " + std::to_string(static_cast<int>(type_)), ErrorCodes::LOGICAL_ERROR);
|
||||||
@ -176,6 +192,9 @@ inline void Authentication::setPassword(const String & password_)
|
|||||||
case DOUBLE_SHA1_PASSWORD:
|
case DOUBLE_SHA1_PASSWORD:
|
||||||
return setPasswordHashBinary(encodeDoubleSHA1(password_));
|
return setPasswordHashBinary(encodeDoubleSHA1(password_));
|
||||||
|
|
||||||
|
case LDAP_SERVER:
|
||||||
|
throw Exception("Cannot specify password for the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
case MAX_TYPE: break;
|
case MAX_TYPE: break;
|
||||||
}
|
}
|
||||||
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("setPassword(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
@ -200,6 +219,8 @@ inline void Authentication::setPasswordHashHex(const String & hash)
|
|||||||
|
|
||||||
inline String Authentication::getPasswordHashHex() const
|
inline String Authentication::getPasswordHashHex() const
|
||||||
{
|
{
|
||||||
|
if (type == LDAP_SERVER)
|
||||||
|
throw Exception("Cannot get password of a user with the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR);
|
||||||
String hex;
|
String hex;
|
||||||
hex.resize(password_hash.size() * 2);
|
hex.resize(password_hash.size() * 2);
|
||||||
boost::algorithm::hex(password_hash.begin(), password_hash.end(), hex.data());
|
boost::algorithm::hex(password_hash.begin(), password_hash.end(), hex.data());
|
||||||
@ -242,9 +263,22 @@ inline void Authentication::setPasswordHashBinary(const Digest & hash)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case LDAP_SERVER:
|
||||||
|
throw Exception("Cannot specify password for the 'LDAP_SERVER' authentication type", ErrorCodes::LOGICAL_ERROR);
|
||||||
|
|
||||||
case MAX_TYPE: break;
|
case MAX_TYPE: break;
|
||||||
}
|
}
|
||||||
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("setPasswordHashBinary(): authentication type " + toString(type) + " not supported", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline const String & Authentication::getServerName() const
|
||||||
|
{
|
||||||
|
return server_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void Authentication::setServerName(const String & server_name_)
|
||||||
|
{
|
||||||
|
server_name = server_name_;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -293,7 +293,7 @@ bool ContextAccess::isCorrectPassword(const String & password) const
|
|||||||
std::lock_guard lock{mutex};
|
std::lock_guard lock{mutex};
|
||||||
if (!user)
|
if (!user)
|
||||||
return false;
|
return false;
|
||||||
return user->authentication.isCorrectPassword(password);
|
return user->authentication.isCorrectPassword(password, user_name, manager->getExternalAuthenticators());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ContextAccess::isClientHostAllowed() const
|
bool ContextAccess::isClientHostAllowed() const
|
||||||
|
182
src/Access/ExternalAuthenticators.cpp
Normal file
182
src/Access/ExternalAuthenticators.cpp
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
#include <Access/ExternalAuthenticators.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
#include <Common/quoteString.h>
|
||||||
|
#include <Poco/Util/AbstractConfiguration.h>
|
||||||
|
#include <boost/algorithm/string/case_conv.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
|
||||||
|
auto parseLDAPServer(const Poco::Util::AbstractConfiguration & config, const String & ldap_server_name)
|
||||||
|
{
|
||||||
|
if (ldap_server_name.empty())
|
||||||
|
throw Exception("LDAP server name cannot be empty", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
LDAPServerParams params;
|
||||||
|
|
||||||
|
const String ldap_server_config = "ldap_servers." + ldap_server_name;
|
||||||
|
|
||||||
|
const bool has_host = config.has(ldap_server_config + ".host");
|
||||||
|
const bool has_port = config.has(ldap_server_config + ".port");
|
||||||
|
const bool has_auth_dn_prefix = config.has(ldap_server_config + ".auth_dn_prefix");
|
||||||
|
const bool has_auth_dn_suffix = config.has(ldap_server_config + ".auth_dn_suffix");
|
||||||
|
const bool has_enable_tls = config.has(ldap_server_config + ".enable_tls");
|
||||||
|
const bool has_tls_minimum_protocol_version = config.has(ldap_server_config + ".tls_minimum_protocol_version");
|
||||||
|
const bool has_tls_require_cert = config.has(ldap_server_config + ".tls_require_cert");
|
||||||
|
const bool has_tls_cert_file = config.has(ldap_server_config + ".tls_cert_file");
|
||||||
|
const bool has_tls_key_file = config.has(ldap_server_config + ".tls_key_file");
|
||||||
|
const bool has_tls_ca_cert_file = config.has(ldap_server_config + ".tls_ca_cert_file");
|
||||||
|
const bool has_tls_ca_cert_dir = config.has(ldap_server_config + ".tls_ca_cert_dir");
|
||||||
|
const bool has_tls_cipher_suite = config.has(ldap_server_config + ".tls_cipher_suite");
|
||||||
|
|
||||||
|
if (!has_host)
|
||||||
|
throw Exception("Missing 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
params.host = config.getString(ldap_server_config + ".host");
|
||||||
|
|
||||||
|
if (params.host.empty())
|
||||||
|
throw Exception("Empty 'host' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
if (has_auth_dn_prefix)
|
||||||
|
params.auth_dn_prefix = config.getString(ldap_server_config + ".auth_dn_prefix");
|
||||||
|
|
||||||
|
if (has_auth_dn_suffix)
|
||||||
|
params.auth_dn_suffix = config.getString(ldap_server_config + ".auth_dn_suffix");
|
||||||
|
|
||||||
|
if (has_enable_tls)
|
||||||
|
{
|
||||||
|
String enable_tls_lc_str = config.getString(ldap_server_config + ".enable_tls");
|
||||||
|
boost::to_lower(enable_tls_lc_str);
|
||||||
|
|
||||||
|
if (enable_tls_lc_str == "starttls")
|
||||||
|
params.enable_tls = LDAPServerParams::TLSEnable::YES_STARTTLS;
|
||||||
|
else if (config.getBool(ldap_server_config + ".enable_tls"))
|
||||||
|
params.enable_tls = LDAPServerParams::TLSEnable::YES;
|
||||||
|
else
|
||||||
|
params.enable_tls = LDAPServerParams::TLSEnable::NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_tls_minimum_protocol_version)
|
||||||
|
{
|
||||||
|
String tls_minimum_protocol_version_lc_str = config.getString(ldap_server_config + ".tls_minimum_protocol_version");
|
||||||
|
boost::to_lower(tls_minimum_protocol_version_lc_str);
|
||||||
|
|
||||||
|
if (tls_minimum_protocol_version_lc_str == "ssl2")
|
||||||
|
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::SSL2;
|
||||||
|
else if (tls_minimum_protocol_version_lc_str == "ssl3")
|
||||||
|
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::SSL3;
|
||||||
|
else if (tls_minimum_protocol_version_lc_str == "tls1.0")
|
||||||
|
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_0;
|
||||||
|
else if (tls_minimum_protocol_version_lc_str == "tls1.1")
|
||||||
|
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_1;
|
||||||
|
else if (tls_minimum_protocol_version_lc_str == "tls1.2")
|
||||||
|
params.tls_minimum_protocol_version = LDAPServerParams::TLSProtocolVersion::TLS1_2;
|
||||||
|
else
|
||||||
|
throw Exception("Bad value for 'tls_minimum_protocol_version' entry, allowed values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2'", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_tls_require_cert)
|
||||||
|
{
|
||||||
|
String tls_require_cert_lc_str = config.getString(ldap_server_config + ".tls_require_cert");
|
||||||
|
boost::to_lower(tls_require_cert_lc_str);
|
||||||
|
|
||||||
|
if (tls_require_cert_lc_str == "never")
|
||||||
|
params.tls_require_cert = LDAPServerParams::TLSRequireCert::NEVER;
|
||||||
|
else if (tls_require_cert_lc_str == "allow")
|
||||||
|
params.tls_require_cert = LDAPServerParams::TLSRequireCert::ALLOW;
|
||||||
|
else if (tls_require_cert_lc_str == "try")
|
||||||
|
params.tls_require_cert = LDAPServerParams::TLSRequireCert::TRY;
|
||||||
|
else if (tls_require_cert_lc_str == "demand")
|
||||||
|
params.tls_require_cert = LDAPServerParams::TLSRequireCert::DEMAND;
|
||||||
|
else
|
||||||
|
throw Exception("Bad value for 'tls_require_cert' entry, allowed values are: 'never', 'allow', 'try', 'demand'", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (has_tls_cert_file)
|
||||||
|
params.tls_cert_file = config.getString(ldap_server_config + ".tls_cert_file");
|
||||||
|
|
||||||
|
if (has_tls_key_file)
|
||||||
|
params.tls_key_file = config.getString(ldap_server_config + ".tls_key_file");
|
||||||
|
|
||||||
|
if (has_tls_ca_cert_file)
|
||||||
|
params.tls_ca_cert_file = config.getString(ldap_server_config + ".tls_ca_cert_file");
|
||||||
|
|
||||||
|
if (has_tls_ca_cert_dir)
|
||||||
|
params.tls_ca_cert_dir = config.getString(ldap_server_config + ".tls_ca_cert_dir");
|
||||||
|
|
||||||
|
if (has_tls_cipher_suite)
|
||||||
|
params.tls_cipher_suite = config.getString(ldap_server_config + ".tls_cipher_suite");
|
||||||
|
|
||||||
|
if (has_port)
|
||||||
|
{
|
||||||
|
const auto port = config.getInt64(ldap_server_config + ".port");
|
||||||
|
if (port < 0 || port > 65535)
|
||||||
|
throw Exception("Bad value for 'port' entry", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
params.port = port;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
params.port = (params.enable_tls == LDAPServerParams::TLSEnable::YES ? 636 : 389);
|
||||||
|
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
void parseAndAddLDAPServers(ExternalAuthenticators & external_authenticators, const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||||
|
{
|
||||||
|
Poco::Util::AbstractConfiguration::Keys ldap_server_names;
|
||||||
|
config.keys("ldap_servers", ldap_server_names);
|
||||||
|
|
||||||
|
for (const auto & ldap_server_name : ldap_server_names)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
external_authenticators.setLDAPServerParams(ldap_server_name, parseLDAPServer(config, ldap_server_name));
|
||||||
|
}
|
||||||
|
catch (...)
|
||||||
|
{
|
||||||
|
tryLogCurrentException(log, "Could not parse LDAP server " + backQuote(ldap_server_name));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExternalAuthenticators::reset()
|
||||||
|
{
|
||||||
|
std::scoped_lock lock(mutex);
|
||||||
|
ldap_server_params.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExternalAuthenticators::setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log)
|
||||||
|
{
|
||||||
|
std::scoped_lock lock(mutex);
|
||||||
|
reset();
|
||||||
|
parseAndAddLDAPServers(*this, config, log);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ExternalAuthenticators::setLDAPServerParams(const String & server, const LDAPServerParams & params)
|
||||||
|
{
|
||||||
|
std::scoped_lock lock(mutex);
|
||||||
|
ldap_server_params.erase(server);
|
||||||
|
ldap_server_params[server] = params;
|
||||||
|
}
|
||||||
|
|
||||||
|
LDAPServerParams ExternalAuthenticators::getLDAPServerParams(const String & server) const
|
||||||
|
{
|
||||||
|
std::scoped_lock lock(mutex);
|
||||||
|
auto it = ldap_server_params.find(server);
|
||||||
|
if (it == ldap_server_params.end())
|
||||||
|
throw Exception("LDAP server '" + server + "' is not configured", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
return it->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
39
src/Access/ExternalAuthenticators.h
Normal file
39
src/Access/ExternalAuthenticators.h
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Access/LDAPParams.h>
|
||||||
|
#include <Core/Types.h>
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
|
||||||
|
namespace Poco
|
||||||
|
{
|
||||||
|
class Logger;
|
||||||
|
|
||||||
|
namespace Util
|
||||||
|
{
|
||||||
|
class AbstractConfiguration;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class ExternalAuthenticators
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
void reset();
|
||||||
|
void setConfig(const Poco::Util::AbstractConfiguration & config, Poco::Logger * log);
|
||||||
|
|
||||||
|
void setLDAPServerParams(const String & server, const LDAPServerParams & params);
|
||||||
|
LDAPServerParams getLDAPServerParams(const String & server) const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutable std::recursive_mutex mutex;
|
||||||
|
std::map<String, LDAPServerParams> ldap_server_params;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
331
src/Access/LDAPClient.cpp
Normal file
331
src/Access/LDAPClient.cpp
Normal file
@ -0,0 +1,331 @@
|
|||||||
|
#include <Access/LDAPClient.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
#include <ext/scope_guard.h>
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <sys/time.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int BAD_ARGUMENTS;
|
||||||
|
extern const int FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME;
|
||||||
|
extern const int LDAP_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
LDAPClient::LDAPClient(const LDAPServerParams & params_)
|
||||||
|
: params(params_)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
LDAPClient::~LDAPClient()
|
||||||
|
{
|
||||||
|
closeConnection();
|
||||||
|
}
|
||||||
|
|
||||||
|
void LDAPClient::openConnection()
|
||||||
|
{
|
||||||
|
const bool graceful_bind_failure = false;
|
||||||
|
diag(openConnection(graceful_bind_failure));
|
||||||
|
}
|
||||||
|
|
||||||
|
#if USE_LDAP
|
||||||
|
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
auto escapeForLDAP(const String & src)
|
||||||
|
{
|
||||||
|
String dest;
|
||||||
|
dest.reserve(src.size() * 2);
|
||||||
|
|
||||||
|
for (auto ch : src)
|
||||||
|
{
|
||||||
|
switch (ch)
|
||||||
|
{
|
||||||
|
case ',':
|
||||||
|
case '\\':
|
||||||
|
case '#':
|
||||||
|
case '+':
|
||||||
|
case '<':
|
||||||
|
case '>':
|
||||||
|
case ';':
|
||||||
|
case '"':
|
||||||
|
case '=':
|
||||||
|
dest += '\\';
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
dest += ch;
|
||||||
|
}
|
||||||
|
|
||||||
|
return dest;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void LDAPClient::diag(const int rc)
|
||||||
|
{
|
||||||
|
if (rc != LDAP_SUCCESS)
|
||||||
|
{
|
||||||
|
String text;
|
||||||
|
const char * raw_err_str = ldap_err2string(rc);
|
||||||
|
|
||||||
|
if (raw_err_str)
|
||||||
|
text = raw_err_str;
|
||||||
|
|
||||||
|
if (handle)
|
||||||
|
{
|
||||||
|
String message;
|
||||||
|
char * raw_message = nullptr;
|
||||||
|
ldap_get_option(handle, LDAP_OPT_DIAGNOSTIC_MESSAGE, &raw_message);
|
||||||
|
|
||||||
|
if (raw_message)
|
||||||
|
{
|
||||||
|
message = raw_message;
|
||||||
|
ldap_memfree(raw_message);
|
||||||
|
raw_message = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!message.empty())
|
||||||
|
{
|
||||||
|
if (!text.empty())
|
||||||
|
text += ": ";
|
||||||
|
text += message;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception(text, ErrorCodes::LDAP_ERROR);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int LDAPClient::openConnection(const bool graceful_bind_failure)
|
||||||
|
{
|
||||||
|
closeConnection();
|
||||||
|
|
||||||
|
{
|
||||||
|
LDAPURLDesc url;
|
||||||
|
std::memset(&url, 0, sizeof(url));
|
||||||
|
|
||||||
|
url.lud_scheme = const_cast<char *>(params.enable_tls == LDAPServerParams::TLSEnable::YES ? "ldaps" : "ldap");
|
||||||
|
url.lud_host = const_cast<char *>(params.host.c_str());
|
||||||
|
url.lud_port = params.port;
|
||||||
|
url.lud_scope = LDAP_SCOPE_DEFAULT;
|
||||||
|
|
||||||
|
auto * uri = ldap_url_desc2str(&url);
|
||||||
|
if (!uri)
|
||||||
|
throw Exception("ldap_url_desc2str() failed", ErrorCodes::LDAP_ERROR);
|
||||||
|
|
||||||
|
SCOPE_EXIT({ ldap_memfree(uri); });
|
||||||
|
|
||||||
|
diag(ldap_initialize(&handle, uri));
|
||||||
|
if (!handle)
|
||||||
|
throw Exception("ldap_initialize() failed", ErrorCodes::LDAP_ERROR);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
int value = 0;
|
||||||
|
switch (params.protocol_version)
|
||||||
|
{
|
||||||
|
case LDAPServerParams::ProtocolVersion::V2: value = LDAP_VERSION2; break;
|
||||||
|
case LDAPServerParams::ProtocolVersion::V3: value = LDAP_VERSION3; break;
|
||||||
|
}
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_PROTOCOL_VERSION, &value));
|
||||||
|
}
|
||||||
|
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_RESTART, LDAP_OPT_ON));
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_KEEPCONN
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_KEEPCONN, LDAP_OPT_ON));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_TIMEOUT
|
||||||
|
{
|
||||||
|
::timeval operation_timeout;
|
||||||
|
operation_timeout.tv_sec = params.operation_timeout.count();
|
||||||
|
operation_timeout.tv_usec = 0;
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_TIMEOUT, &operation_timeout));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_NETWORK_TIMEOUT
|
||||||
|
{
|
||||||
|
::timeval network_timeout;
|
||||||
|
network_timeout.tv_sec = params.network_timeout.count();
|
||||||
|
network_timeout.tv_usec = 0;
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_NETWORK_TIMEOUT, &network_timeout));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
{
|
||||||
|
const int search_timeout = params.search_timeout.count();
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_TIMELIMIT, &search_timeout));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const int size_limit = params.search_limit;
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_SIZELIMIT, &size_limit));
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_PROTOCOL_MIN
|
||||||
|
{
|
||||||
|
int value = 0;
|
||||||
|
switch (params.tls_minimum_protocol_version)
|
||||||
|
{
|
||||||
|
case LDAPServerParams::TLSProtocolVersion::SSL2: value = LDAP_OPT_X_TLS_PROTOCOL_SSL2; break;
|
||||||
|
case LDAPServerParams::TLSProtocolVersion::SSL3: value = LDAP_OPT_X_TLS_PROTOCOL_SSL3; break;
|
||||||
|
case LDAPServerParams::TLSProtocolVersion::TLS1_0: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_0; break;
|
||||||
|
case LDAPServerParams::TLSProtocolVersion::TLS1_1: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_1; break;
|
||||||
|
case LDAPServerParams::TLSProtocolVersion::TLS1_2: value = LDAP_OPT_X_TLS_PROTOCOL_TLS1_2; break;
|
||||||
|
}
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_PROTOCOL_MIN, &value));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_REQUIRE_CERT
|
||||||
|
{
|
||||||
|
int value = 0;
|
||||||
|
switch (params.tls_require_cert)
|
||||||
|
{
|
||||||
|
case LDAPServerParams::TLSRequireCert::NEVER: value = LDAP_OPT_X_TLS_NEVER; break;
|
||||||
|
case LDAPServerParams::TLSRequireCert::ALLOW: value = LDAP_OPT_X_TLS_ALLOW; break;
|
||||||
|
case LDAPServerParams::TLSRequireCert::TRY: value = LDAP_OPT_X_TLS_TRY; break;
|
||||||
|
case LDAPServerParams::TLSRequireCert::DEMAND: value = LDAP_OPT_X_TLS_DEMAND; break;
|
||||||
|
}
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_REQUIRE_CERT, &value));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_CERTFILE
|
||||||
|
if (!params.tls_cert_file.empty())
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CERTFILE, params.tls_cert_file.c_str()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_KEYFILE
|
||||||
|
if (!params.tls_key_file.empty())
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_KEYFILE, params.tls_key_file.c_str()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_CACERTFILE
|
||||||
|
if (!params.tls_ca_cert_file.empty())
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTFILE, params.tls_ca_cert_file.c_str()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_CACERTDIR
|
||||||
|
if (!params.tls_ca_cert_dir.empty())
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CACERTDIR, params.tls_ca_cert_dir.c_str()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_CIPHER_SUITE
|
||||||
|
if (!params.tls_cipher_suite.empty())
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_CIPHER_SUITE, params.tls_cipher_suite.c_str()));
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef LDAP_OPT_X_TLS_NEWCTX
|
||||||
|
{
|
||||||
|
const int i_am_a_server = 0;
|
||||||
|
diag(ldap_set_option(handle, LDAP_OPT_X_TLS_NEWCTX, &i_am_a_server));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (params.enable_tls == LDAPServerParams::TLSEnable::YES_STARTTLS)
|
||||||
|
diag(ldap_start_tls_s(handle, nullptr, nullptr));
|
||||||
|
|
||||||
|
int rc = LDAP_OTHER;
|
||||||
|
|
||||||
|
switch (params.sasl_mechanism)
|
||||||
|
{
|
||||||
|
case LDAPServerParams::SASLMechanism::SIMPLE:
|
||||||
|
{
|
||||||
|
const String dn = params.auth_dn_prefix + escapeForLDAP(params.user) + params.auth_dn_suffix;
|
||||||
|
|
||||||
|
::berval cred;
|
||||||
|
cred.bv_val = const_cast<char *>(params.password.c_str());
|
||||||
|
cred.bv_len = params.password.size();
|
||||||
|
|
||||||
|
rc = ldap_sasl_bind_s(handle, dn.c_str(), LDAP_SASL_SIMPLE, &cred, nullptr, nullptr, nullptr);
|
||||||
|
|
||||||
|
if (!graceful_bind_failure)
|
||||||
|
diag(rc);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
void LDAPClient::closeConnection() noexcept
|
||||||
|
{
|
||||||
|
if (!handle)
|
||||||
|
return;
|
||||||
|
|
||||||
|
ldap_unbind_ext_s(handle, nullptr, nullptr);
|
||||||
|
handle = nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LDAPSimpleAuthClient::check()
|
||||||
|
{
|
||||||
|
if (params.user.empty())
|
||||||
|
throw Exception("LDAP authentication of a user with an empty name is not allowed", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
if (params.password.empty())
|
||||||
|
return false; // Silently reject authentication attempt if the password is empty as if it didn't match.
|
||||||
|
|
||||||
|
SCOPE_EXIT({ closeConnection(); });
|
||||||
|
|
||||||
|
const bool graceful_bind_failure = true;
|
||||||
|
const auto rc = openConnection(graceful_bind_failure);
|
||||||
|
|
||||||
|
bool result = false;
|
||||||
|
|
||||||
|
switch (rc)
|
||||||
|
{
|
||||||
|
case LDAP_SUCCESS:
|
||||||
|
{
|
||||||
|
result = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case LDAP_INVALID_CREDENTIALS:
|
||||||
|
{
|
||||||
|
result = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
result = false;
|
||||||
|
diag(rc);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // USE_LDAP
|
||||||
|
|
||||||
|
void LDAPClient::diag(const int)
|
||||||
|
{
|
||||||
|
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||||
|
}
|
||||||
|
|
||||||
|
int LDAPClient::openConnection(const bool)
|
||||||
|
{
|
||||||
|
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||||
|
}
|
||||||
|
|
||||||
|
void LDAPClient::closeConnection() noexcept
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LDAPSimpleAuthClient::check()
|
||||||
|
{
|
||||||
|
throw Exception("ClickHouse was built without LDAP support", ErrorCodes::FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // USE_LDAP
|
||||||
|
|
||||||
|
}
|
55
src/Access/LDAPClient.h
Normal file
55
src/Access/LDAPClient.h
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#if !defined(ARCADIA_BUILD)
|
||||||
|
# include "config_core.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <Access/LDAPParams.h>
|
||||||
|
#include <Core/Types.h>
|
||||||
|
|
||||||
|
#if USE_LDAP
|
||||||
|
# include <ldap.h>
|
||||||
|
# define MAYBE_NORETURN
|
||||||
|
#else
|
||||||
|
# define MAYBE_NORETURN [[noreturn]]
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
class LDAPClient
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
explicit LDAPClient(const LDAPServerParams & params_);
|
||||||
|
~LDAPClient();
|
||||||
|
|
||||||
|
LDAPClient(const LDAPClient &) = delete;
|
||||||
|
LDAPClient(LDAPClient &&) = delete;
|
||||||
|
LDAPClient & operator= (const LDAPClient &) = delete;
|
||||||
|
LDAPClient & operator= (LDAPClient &&) = delete;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
MAYBE_NORETURN void diag(const int rc);
|
||||||
|
MAYBE_NORETURN void openConnection();
|
||||||
|
int openConnection(const bool graceful_bind_failure = false);
|
||||||
|
void closeConnection() noexcept;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
const LDAPServerParams params;
|
||||||
|
#if USE_LDAP
|
||||||
|
LDAP * handle = nullptr;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
class LDAPSimpleAuthClient
|
||||||
|
: private LDAPClient
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
using LDAPClient::LDAPClient;
|
||||||
|
bool check();
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef MAYBE_NORETURN
|
76
src/Access/LDAPParams.h
Normal file
76
src/Access/LDAPParams.h
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <Core/Types.h>
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
|
|
||||||
|
|
||||||
|
namespace DB
|
||||||
|
{
|
||||||
|
|
||||||
|
struct LDAPServerParams
|
||||||
|
{
|
||||||
|
enum class ProtocolVersion
|
||||||
|
{
|
||||||
|
V2,
|
||||||
|
V3
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class TLSEnable
|
||||||
|
{
|
||||||
|
NO,
|
||||||
|
YES_STARTTLS,
|
||||||
|
YES
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class TLSProtocolVersion
|
||||||
|
{
|
||||||
|
SSL2,
|
||||||
|
SSL3,
|
||||||
|
TLS1_0,
|
||||||
|
TLS1_1,
|
||||||
|
TLS1_2
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class TLSRequireCert
|
||||||
|
{
|
||||||
|
NEVER,
|
||||||
|
ALLOW,
|
||||||
|
TRY,
|
||||||
|
DEMAND
|
||||||
|
};
|
||||||
|
|
||||||
|
enum class SASLMechanism
|
||||||
|
{
|
||||||
|
SIMPLE
|
||||||
|
};
|
||||||
|
|
||||||
|
ProtocolVersion protocol_version = ProtocolVersion::V3;
|
||||||
|
|
||||||
|
String host;
|
||||||
|
std::uint16_t port = 636;
|
||||||
|
|
||||||
|
TLSEnable enable_tls = TLSEnable::YES;
|
||||||
|
TLSProtocolVersion tls_minimum_protocol_version = TLSProtocolVersion::TLS1_2;
|
||||||
|
TLSRequireCert tls_require_cert = TLSRequireCert::DEMAND;
|
||||||
|
String tls_cert_file;
|
||||||
|
String tls_key_file;
|
||||||
|
String tls_ca_cert_file;
|
||||||
|
String tls_ca_cert_dir;
|
||||||
|
String tls_cipher_suite;
|
||||||
|
|
||||||
|
SASLMechanism sasl_mechanism = SASLMechanism::SIMPLE;
|
||||||
|
|
||||||
|
String auth_dn_prefix;
|
||||||
|
String auth_dn_suffix;
|
||||||
|
|
||||||
|
String user;
|
||||||
|
String password;
|
||||||
|
|
||||||
|
std::chrono::seconds operation_timeout{40};
|
||||||
|
std::chrono::seconds network_timeout{30};
|
||||||
|
std::chrono::seconds search_timeout{20};
|
||||||
|
std::uint32_t search_limit = 100;
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
@ -56,14 +56,15 @@ namespace
|
|||||||
bool has_password_plaintext = config.has(user_config + ".password");
|
bool has_password_plaintext = config.has(user_config + ".password");
|
||||||
bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex");
|
bool has_password_sha256_hex = config.has(user_config + ".password_sha256_hex");
|
||||||
bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex");
|
bool has_password_double_sha1_hex = config.has(user_config + ".password_double_sha1_hex");
|
||||||
|
bool has_ldap = config.has(user_config + ".ldap");
|
||||||
|
|
||||||
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex;
|
size_t num_password_fields = has_no_password + has_password_plaintext + has_password_sha256_hex + has_password_double_sha1_hex + has_ldap;
|
||||||
if (num_password_fields > 1)
|
if (num_password_fields > 1)
|
||||||
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password' are used to specify password for user " + user_name + ". Must be only one of them.",
|
throw Exception("More than one field of 'password', 'password_sha256_hex', 'password_double_sha1_hex', 'no_password', 'ldap' are used to specify password for user " + user_name + ". Must be only one of them.",
|
||||||
ErrorCodes::BAD_ARGUMENTS);
|
ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
if (num_password_fields < 1)
|
if (num_password_fields < 1)
|
||||||
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
throw Exception("Either 'password' or 'password_sha256_hex' or 'password_double_sha1_hex' or 'no_password' or 'ldap' must be specified for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
if (has_password_plaintext)
|
if (has_password_plaintext)
|
||||||
{
|
{
|
||||||
@ -80,6 +81,19 @@ namespace
|
|||||||
user->authentication = Authentication{Authentication::DOUBLE_SHA1_PASSWORD};
|
user->authentication = Authentication{Authentication::DOUBLE_SHA1_PASSWORD};
|
||||||
user->authentication.setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex"));
|
user->authentication.setPasswordHashHex(config.getString(user_config + ".password_double_sha1_hex"));
|
||||||
}
|
}
|
||||||
|
else if (has_ldap)
|
||||||
|
{
|
||||||
|
bool has_ldap_server = config.has(user_config + ".ldap.server");
|
||||||
|
if (!has_ldap_server)
|
||||||
|
throw Exception("Missing mandatory 'server' in 'ldap', with LDAP server name, for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
const auto ldap_server_name = config.getString(user_config + ".ldap.server");
|
||||||
|
if (ldap_server_name.empty())
|
||||||
|
throw Exception("LDAP server name cannot be empty for user " + user_name + ".", ErrorCodes::BAD_ARGUMENTS);
|
||||||
|
|
||||||
|
user->authentication = Authentication{Authentication::LDAP_SERVER};
|
||||||
|
user->authentication.setServerName(ldap_server_name);
|
||||||
|
}
|
||||||
|
|
||||||
const auto profile_name_config = user_config + ".profile";
|
const auto profile_name_config = user_config + ".profile";
|
||||||
if (config.has(profile_name_config))
|
if (config.has(profile_name_config))
|
||||||
|
@ -17,9 +17,11 @@ SRCS(
|
|||||||
EnabledRolesInfo.cpp
|
EnabledRolesInfo.cpp
|
||||||
EnabledRowPolicies.cpp
|
EnabledRowPolicies.cpp
|
||||||
EnabledSettings.cpp
|
EnabledSettings.cpp
|
||||||
|
ExternalAuthenticators.cpp
|
||||||
GrantedRoles.cpp
|
GrantedRoles.cpp
|
||||||
IAccessEntity.cpp
|
IAccessEntity.cpp
|
||||||
IAccessStorage.cpp
|
IAccessStorage.cpp
|
||||||
|
LDAPClient.cpp
|
||||||
MemoryAccessStorage.cpp
|
MemoryAccessStorage.cpp
|
||||||
MultipleAccessStorage.cpp
|
MultipleAccessStorage.cpp
|
||||||
Quota.cpp
|
Quota.cpp
|
||||||
|
@ -20,6 +20,7 @@ template <typename T, typename Denominator>
|
|||||||
struct AggregateFunctionAvgData
|
struct AggregateFunctionAvgData
|
||||||
{
|
{
|
||||||
using NumeratorType = T;
|
using NumeratorType = T;
|
||||||
|
using DenominatorType = Denominator;
|
||||||
|
|
||||||
T numerator = 0;
|
T numerator = 0;
|
||||||
Denominator denominator = 0;
|
Denominator denominator = 0;
|
||||||
@ -73,13 +74,21 @@ public:
|
|||||||
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override
|
void serialize(ConstAggregateDataPtr place, WriteBuffer & buf) const override
|
||||||
{
|
{
|
||||||
writeBinary(this->data(place).numerator, buf);
|
writeBinary(this->data(place).numerator, buf);
|
||||||
writeBinary(this->data(place).denominator, buf);
|
|
||||||
|
if constexpr (std::is_unsigned_v<typename Data::DenominatorType>)
|
||||||
|
writeVarUInt(this->data(place).denominator, buf);
|
||||||
|
else /// Floating point denominator type can be used
|
||||||
|
writeBinary(this->data(place).denominator, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override
|
void deserialize(AggregateDataPtr place, ReadBuffer & buf, Arena *) const override
|
||||||
{
|
{
|
||||||
readBinary(this->data(place).numerator, buf);
|
readBinary(this->data(place).numerator, buf);
|
||||||
readBinary(this->data(place).denominator, buf);
|
|
||||||
|
if constexpr (std::is_unsigned_v<typename Data::DenominatorType>)
|
||||||
|
readVarUInt(this->data(place).denominator, buf);
|
||||||
|
else /// Floating point denominator type can be used
|
||||||
|
readBinary(this->data(place).denominator, buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
|
void insertResultInto(AggregateDataPtr place, IColumn & to, Arena *) const override
|
||||||
|
@ -332,7 +332,7 @@ if (OPENSSL_CRYPTO_LIBRARY)
|
|||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
if (USE_LDAP)
|
if (USE_LDAP)
|
||||||
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${OPENLDAP_INCLUDE_DIR})
|
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${OPENLDAP_INCLUDE_DIRS})
|
||||||
dbms_target_link_libraries (PRIVATE ${OPENLDAP_LIBRARIES})
|
dbms_target_link_libraries (PRIVATE ${OPENLDAP_LIBRARIES})
|
||||||
endif ()
|
endif ()
|
||||||
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR})
|
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARSEHASH_INCLUDE_DIR})
|
||||||
@ -370,7 +370,9 @@ endif()
|
|||||||
|
|
||||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR})
|
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR})
|
||||||
|
|
||||||
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR})
|
if (USE_MSGPACK)
|
||||||
|
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${MSGPACK_INCLUDE_DIR})
|
||||||
|
endif()
|
||||||
|
|
||||||
if (USE_ORC)
|
if (USE_ORC)
|
||||||
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
dbms_target_link_libraries(PUBLIC ${ORC_LIBRARIES})
|
||||||
|
@ -498,6 +498,7 @@ namespace ErrorCodes
|
|||||||
extern const int NOT_A_LEADER = 529;
|
extern const int NOT_A_LEADER = 529;
|
||||||
extern const int CANNOT_CONNECT_RABBITMQ = 530;
|
extern const int CANNOT_CONNECT_RABBITMQ = 530;
|
||||||
extern const int CANNOT_FSTAT = 531;
|
extern const int CANNOT_FSTAT = 531;
|
||||||
|
extern const int LDAP_ERROR = 532;
|
||||||
|
|
||||||
extern const int KEEPER_EXCEPTION = 999;
|
extern const int KEEPER_EXCEPTION = 999;
|
||||||
extern const int POCO_EXCEPTION = 1000;
|
extern const int POCO_EXCEPTION = 1000;
|
||||||
|
@ -12,6 +12,12 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int UNEXPECTED_END_OF_FILE;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
FileChecker::FileChecker(DiskPtr disk_, const String & file_info_path_) : disk(std::move(disk_))
|
FileChecker::FileChecker(DiskPtr disk_, const String & file_info_path_) : disk(std::move(disk_))
|
||||||
{
|
{
|
||||||
setPath(file_info_path_);
|
setPath(file_info_path_);
|
||||||
@ -24,19 +30,15 @@ void FileChecker::setPath(const String & file_info_path_)
|
|||||||
tmp_files_info_path = parentPath(files_info_path) + "tmp_" + fileName(files_info_path);
|
tmp_files_info_path = parentPath(files_info_path) + "tmp_" + fileName(files_info_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileChecker::update(const String & file_path)
|
void FileChecker::update(const String & full_file_path)
|
||||||
{
|
{
|
||||||
initialize();
|
initialize();
|
||||||
updateImpl(file_path);
|
map[fileName(full_file_path)] = disk->getFileSize(full_file_path);
|
||||||
save();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileChecker::update(const Strings::const_iterator & begin, const Strings::const_iterator & end)
|
void FileChecker::setEmpty(const String & full_file_path)
|
||||||
{
|
{
|
||||||
initialize();
|
map[fileName(full_file_path)] = 0;
|
||||||
for (auto it = begin; it != end; ++it)
|
|
||||||
updateImpl(*it);
|
|
||||||
save();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CheckResults FileChecker::check() const
|
CheckResults FileChecker::check() const
|
||||||
@ -73,6 +75,28 @@ CheckResults FileChecker::check() const
|
|||||||
return results;
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FileChecker::repair()
|
||||||
|
{
|
||||||
|
for (const auto & name_size : map)
|
||||||
|
{
|
||||||
|
const String & name = name_size.first;
|
||||||
|
size_t expected_size = name_size.second;
|
||||||
|
String path = parentPath(files_info_path) + name;
|
||||||
|
bool exists = disk->exists(path);
|
||||||
|
auto real_size = exists ? disk->getFileSize(path) : 0; /// No race condition assuming no one else is working with these files.
|
||||||
|
|
||||||
|
if (real_size < expected_size)
|
||||||
|
throw Exception(ErrorCodes::UNEXPECTED_END_OF_FILE, "Size of {} is less than expected. Size is {} but should be {}.",
|
||||||
|
path, real_size, expected_size);
|
||||||
|
|
||||||
|
if (real_size > expected_size)
|
||||||
|
{
|
||||||
|
LOG_WARNING(&Poco::Logger::get("FileChecker"), "Will truncate file {} that has size {} to size {}", path, real_size, expected_size);
|
||||||
|
disk->truncateFile(path, expected_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void FileChecker::initialize()
|
void FileChecker::initialize()
|
||||||
{
|
{
|
||||||
if (initialized)
|
if (initialized)
|
||||||
@ -82,11 +106,6 @@ void FileChecker::initialize()
|
|||||||
initialized = true;
|
initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileChecker::updateImpl(const String & file_path)
|
|
||||||
{
|
|
||||||
map[fileName(file_path)] = disk->getFileSize(file_path);
|
|
||||||
}
|
|
||||||
|
|
||||||
void FileChecker::save() const
|
void FileChecker::save() const
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
|
@ -14,19 +14,25 @@ class FileChecker
|
|||||||
public:
|
public:
|
||||||
FileChecker(DiskPtr disk_, const String & file_info_path_);
|
FileChecker(DiskPtr disk_, const String & file_info_path_);
|
||||||
void setPath(const String & file_info_path_);
|
void setPath(const String & file_info_path_);
|
||||||
void update(const String & file_path);
|
|
||||||
void update(const Strings::const_iterator & begin, const Strings::const_iterator & end);
|
void update(const String & full_file_path);
|
||||||
|
void setEmpty(const String & full_file_path);
|
||||||
|
void save() const;
|
||||||
|
|
||||||
/// Check the files whose parameters are specified in sizes.json
|
/// Check the files whose parameters are specified in sizes.json
|
||||||
CheckResults check() const;
|
CheckResults check() const;
|
||||||
|
|
||||||
|
/// Truncate files that have excessive size to the expected size.
|
||||||
|
/// Throw exception if the file size is less than expected.
|
||||||
|
/// The purpose of this function is to rollback a group of unfinished writes.
|
||||||
|
void repair();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// File name -> size.
|
/// File name -> size.
|
||||||
using Map = std::map<String, UInt64>;
|
using Map = std::map<String, UInt64>;
|
||||||
|
|
||||||
void initialize();
|
void initialize();
|
||||||
void updateImpl(const String & file_path);
|
void updateImpl(const String & file_path);
|
||||||
void save() const;
|
|
||||||
void load(Map & local_map, const String & path) const;
|
void load(Map & local_map, const String & path) const;
|
||||||
|
|
||||||
DiskPtr disk;
|
DiskPtr disk;
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
#if defined(OS_LINUX)
|
||||||
|
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <fcntl.h>
|
#include <fcntl.h>
|
||||||
@ -101,3 +103,5 @@ MemoryStatisticsOS::Data MemoryStatisticsOS::get() const
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
#if defined(OS_LINUX)
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
|
|
||||||
|
|
||||||
@ -38,3 +39,5 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
@ -212,6 +212,21 @@
|
|||||||
M(NotCreatedLogEntryForMerge, "Log entry to merge parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \
|
M(NotCreatedLogEntryForMerge, "Log entry to merge parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \
|
||||||
M(CreatedLogEntryForMutation, "Successfully created log entry to mutate parts in ReplicatedMergeTree.") \
|
M(CreatedLogEntryForMutation, "Successfully created log entry to mutate parts in ReplicatedMergeTree.") \
|
||||||
M(NotCreatedLogEntryForMutation, "Log entry to mutate parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \
|
M(NotCreatedLogEntryForMutation, "Log entry to mutate parts in ReplicatedMergeTree is not created due to concurrent log update by another replica.") \
|
||||||
|
\
|
||||||
|
M(S3ReadMicroseconds, "Time of GET and HEAD requests to S3 storage.") \
|
||||||
|
M(S3ReadBytes, "Read bytes (incoming) in GET and HEAD requests to S3 storage.") \
|
||||||
|
M(S3ReadRequestsCount, "Number of GET and HEAD requests to S3 storage.") \
|
||||||
|
M(S3ReadRequestsErrors, "Number of non-throttling errors in GET and HEAD requests to S3 storage.") \
|
||||||
|
M(S3ReadRequestsThrottling, "Number of 429 and 503 errors in GET and HEAD requests to S3 storage.") \
|
||||||
|
M(S3ReadRequestsRedirects, "Number of redirects in GET and HEAD requests to S3 storage.") \
|
||||||
|
\
|
||||||
|
M(S3WriteMicroseconds, "Time of POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
|
M(S3WriteBytes, "Write bytes (outgoing) in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
|
M(S3WriteRequestsCount, "Number of POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
|
M(S3WriteRequestsErrors, "Number of non-throttling errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
|
M(S3WriteRequestsThrottling, "Number of 429 and 503 errors in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
|
M(S3WriteRequestsRedirects, "Number of redirects in POST, DELETE, PUT and PATCH requests to S3 storage.") \
|
||||||
|
|
||||||
|
|
||||||
namespace ProfileEvents
|
namespace ProfileEvents
|
||||||
{
|
{
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
#include <iomanip>
|
#include <iomanip>
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
|
||||||
namespace Coordination
|
namespace Coordination
|
||||||
@ -25,11 +26,14 @@ static String baseName(const String & path)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
using Undo = std::function<void()>;
|
||||||
|
|
||||||
|
|
||||||
struct TestKeeperRequest : virtual Request
|
struct TestKeeperRequest : virtual Request
|
||||||
{
|
{
|
||||||
virtual bool isMutable() const { return false; }
|
virtual bool isMutable() const { return false; }
|
||||||
virtual ResponsePtr createResponse() const = 0;
|
virtual ResponsePtr createResponse() const = 0;
|
||||||
virtual ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const = 0;
|
virtual std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const = 0;
|
||||||
virtual void processWatches(TestKeeper::Watches & /*watches*/, TestKeeper::Watches & /*list_watches*/) const {}
|
virtual void processWatches(TestKeeper::Watches & /*watches*/, TestKeeper::Watches & /*list_watches*/) const {}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -69,7 +73,7 @@ struct TestKeeperCreateRequest final : CreateRequest, TestKeeperRequest
|
|||||||
TestKeeperCreateRequest() = default;
|
TestKeeperCreateRequest() = default;
|
||||||
explicit TestKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {}
|
explicit TestKeeperCreateRequest(const CreateRequest & base) : CreateRequest(base) {}
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
|
|
||||||
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
||||||
{
|
{
|
||||||
@ -83,7 +87,7 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest
|
|||||||
explicit TestKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
explicit TestKeeperRemoveRequest(const RemoveRequest & base) : RemoveRequest(base) {}
|
||||||
bool isMutable() const override { return true; }
|
bool isMutable() const override { return true; }
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
|
|
||||||
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
||||||
{
|
{
|
||||||
@ -94,14 +98,14 @@ struct TestKeeperRemoveRequest final : RemoveRequest, TestKeeperRequest
|
|||||||
struct TestKeeperExistsRequest final : ExistsRequest, TestKeeperRequest
|
struct TestKeeperExistsRequest final : ExistsRequest, TestKeeperRequest
|
||||||
{
|
{
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TestKeeperGetRequest final : GetRequest, TestKeeperRequest
|
struct TestKeeperGetRequest final : GetRequest, TestKeeperRequest
|
||||||
{
|
{
|
||||||
TestKeeperGetRequest() = default;
|
TestKeeperGetRequest() = default;
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest
|
struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest
|
||||||
@ -110,7 +114,7 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest
|
|||||||
explicit TestKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
explicit TestKeeperSetRequest(const SetRequest & base) : SetRequest(base) {}
|
||||||
bool isMutable() const override { return true; }
|
bool isMutable() const override { return true; }
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
|
|
||||||
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
void processWatches(TestKeeper::Watches & node_watches, TestKeeper::Watches & list_watches) const override
|
||||||
{
|
{
|
||||||
@ -121,7 +125,7 @@ struct TestKeeperSetRequest final : SetRequest, TestKeeperRequest
|
|||||||
struct TestKeeperListRequest final : ListRequest, TestKeeperRequest
|
struct TestKeeperListRequest final : ListRequest, TestKeeperRequest
|
||||||
{
|
{
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TestKeeperCheckRequest final : CheckRequest, TestKeeperRequest
|
struct TestKeeperCheckRequest final : CheckRequest, TestKeeperRequest
|
||||||
@ -129,7 +133,7 @@ struct TestKeeperCheckRequest final : CheckRequest, TestKeeperRequest
|
|||||||
TestKeeperCheckRequest() = default;
|
TestKeeperCheckRequest() = default;
|
||||||
explicit TestKeeperCheckRequest(const CheckRequest & base) : CheckRequest(base) {}
|
explicit TestKeeperCheckRequest(const CheckRequest & base) : CheckRequest(base) {}
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
|
struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
|
||||||
@ -169,13 +173,15 @@ struct TestKeeperMultiRequest final : MultiRequest, TestKeeperRequest
|
|||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr createResponse() const override;
|
ResponsePtr createResponse() const override;
|
||||||
ResponsePtr process(TestKeeper::Container & container, int64_t zxid) const override;
|
std::pair<ResponsePtr, Undo> process(TestKeeper::Container & container, int64_t zxid) const override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
ResponsePtr TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
std::pair<ResponsePtr, Undo> TestKeeperCreateRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
CreateResponse response;
|
CreateResponse response;
|
||||||
|
Undo undo;
|
||||||
|
|
||||||
if (container.count(path))
|
if (container.count(path))
|
||||||
{
|
{
|
||||||
response.error = Error::ZNODEEXISTS;
|
response.error = Error::ZNODEEXISTS;
|
||||||
@ -219,7 +225,18 @@ ResponsePtr TestKeeperCreateRequest::process(TestKeeper::Container & container,
|
|||||||
}
|
}
|
||||||
|
|
||||||
response.path_created = path_created;
|
response.path_created = path_created;
|
||||||
container.emplace(std::move(path_created), std::move(created_node));
|
container.emplace(path_created, std::move(created_node));
|
||||||
|
|
||||||
|
undo = [&container, path_created, is_sequential = is_sequential, parent_path = it->first]
|
||||||
|
{
|
||||||
|
container.erase(path_created);
|
||||||
|
auto & undo_parent = container.at(parent_path);
|
||||||
|
--undo_parent.stat.cversion;
|
||||||
|
--undo_parent.stat.numChildren;
|
||||||
|
|
||||||
|
if (is_sequential)
|
||||||
|
--undo_parent.seq_num;
|
||||||
|
};
|
||||||
|
|
||||||
++it->second.stat.cversion;
|
++it->second.stat.cversion;
|
||||||
++it->second.stat.numChildren;
|
++it->second.stat.numChildren;
|
||||||
@ -228,12 +245,13 @@ ResponsePtr TestKeeperCreateRequest::process(TestKeeper::Container & container,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<CreateResponse>(response);
|
return { std::make_shared<CreateResponse>(response), undo };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperRemoveRequest::process(TestKeeper::Container & container, int64_t) const
|
||||||
{
|
{
|
||||||
RemoveResponse response;
|
RemoveResponse response;
|
||||||
|
Undo undo;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
if (it == container.end())
|
if (it == container.end())
|
||||||
@ -250,17 +268,26 @@ ResponsePtr TestKeeperRemoveRequest::process(TestKeeper::Container & container,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
auto prev_node = it->second;
|
||||||
container.erase(it);
|
container.erase(it);
|
||||||
auto & parent = container.at(parentPath(path));
|
auto & parent = container.at(parentPath(path));
|
||||||
--parent.stat.numChildren;
|
--parent.stat.numChildren;
|
||||||
++parent.stat.cversion;
|
++parent.stat.cversion;
|
||||||
response.error = Error::ZOK;
|
response.error = Error::ZOK;
|
||||||
|
|
||||||
|
undo = [prev_node, &container, path = path]
|
||||||
|
{
|
||||||
|
container.emplace(path, prev_node);
|
||||||
|
auto & undo_parent = container.at(parentPath(path));
|
||||||
|
++undo_parent.stat.numChildren;
|
||||||
|
--undo_parent.stat.cversion;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<RemoveResponse>(response);
|
return { std::make_shared<RemoveResponse>(response), undo };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperExistsRequest::process(TestKeeper::Container & container, int64_t) const
|
||||||
{
|
{
|
||||||
ExistsResponse response;
|
ExistsResponse response;
|
||||||
|
|
||||||
@ -275,10 +302,10 @@ ResponsePtr TestKeeperExistsRequest::process(TestKeeper::Container & container,
|
|||||||
response.error = Error::ZNONODE;
|
response.error = Error::ZNONODE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<ExistsResponse>(response);
|
return { std::make_shared<ExistsResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperGetRequest::process(TestKeeper::Container & container, int64_t) const
|
||||||
{
|
{
|
||||||
GetResponse response;
|
GetResponse response;
|
||||||
|
|
||||||
@ -294,12 +321,13 @@ ResponsePtr TestKeeperGetRequest::process(TestKeeper::Container & container, int
|
|||||||
response.error = Error::ZOK;
|
response.error = Error::ZOK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<GetResponse>(response);
|
return { std::make_shared<GetResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
std::pair<ResponsePtr, Undo> TestKeeperSetRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
SetResponse response;
|
SetResponse response;
|
||||||
|
Undo undo;
|
||||||
|
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
if (it == container.end())
|
if (it == container.end())
|
||||||
@ -308,6 +336,8 @@ ResponsePtr TestKeeperSetRequest::process(TestKeeper::Container & container, int
|
|||||||
}
|
}
|
||||||
else if (version == -1 || version == it->second.stat.version)
|
else if (version == -1 || version == it->second.stat.version)
|
||||||
{
|
{
|
||||||
|
auto prev_node = it->second;
|
||||||
|
|
||||||
it->second.data = data;
|
it->second.data = data;
|
||||||
++it->second.stat.version;
|
++it->second.stat.version;
|
||||||
it->second.stat.mzxid = zxid;
|
it->second.stat.mzxid = zxid;
|
||||||
@ -316,16 +346,22 @@ ResponsePtr TestKeeperSetRequest::process(TestKeeper::Container & container, int
|
|||||||
++container.at(parentPath(path)).stat.cversion;
|
++container.at(parentPath(path)).stat.cversion;
|
||||||
response.stat = it->second.stat;
|
response.stat = it->second.stat;
|
||||||
response.error = Error::ZOK;
|
response.error = Error::ZOK;
|
||||||
|
|
||||||
|
undo = [prev_node, &container, path = path]
|
||||||
|
{
|
||||||
|
container.at(path) = prev_node;
|
||||||
|
--container.at(parentPath(path)).stat.cversion;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
response.error = Error::ZBADVERSION;
|
response.error = Error::ZBADVERSION;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<SetResponse>(response);
|
return { std::make_shared<SetResponse>(response), undo };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperListRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperListRequest::process(TestKeeper::Container & container, int64_t) const
|
||||||
{
|
{
|
||||||
ListResponse response;
|
ListResponse response;
|
||||||
|
|
||||||
@ -344,18 +380,22 @@ ResponsePtr TestKeeperListRequest::process(TestKeeper::Container & container, in
|
|||||||
path_prefix += '/';
|
path_prefix += '/';
|
||||||
|
|
||||||
/// Fairly inefficient.
|
/// Fairly inefficient.
|
||||||
for (auto child_it = container.upper_bound(path_prefix); child_it != container.end() && startsWith(child_it->first, path_prefix); ++child_it)
|
for (auto child_it = container.upper_bound(path_prefix);
|
||||||
|
child_it != container.end() && startsWith(child_it->first, path_prefix);
|
||||||
|
++child_it)
|
||||||
|
{
|
||||||
if (parentPath(child_it->first) == path)
|
if (parentPath(child_it->first) == path)
|
||||||
response.names.emplace_back(baseName(child_it->first));
|
response.names.emplace_back(baseName(child_it->first));
|
||||||
|
}
|
||||||
|
|
||||||
response.stat = it->second.stat;
|
response.stat = it->second.stat;
|
||||||
response.error = Error::ZOK;
|
response.error = Error::ZOK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<ListResponse>(response);
|
return { std::make_shared<ListResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t) const
|
std::pair<ResponsePtr, Undo> TestKeeperCheckRequest::process(TestKeeper::Container & container, int64_t) const
|
||||||
{
|
{
|
||||||
CheckResponse response;
|
CheckResponse response;
|
||||||
auto it = container.find(path);
|
auto it = container.find(path);
|
||||||
@ -372,38 +412,44 @@ ResponsePtr TestKeeperCheckRequest::process(TestKeeper::Container & container, i
|
|||||||
response.error = Error::ZOK;
|
response.error = Error::ZOK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<CheckResponse>(response);
|
return { std::make_shared<CheckResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
|
||||||
ResponsePtr TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
std::pair<ResponsePtr, Undo> TestKeeperMultiRequest::process(TestKeeper::Container & container, int64_t zxid) const
|
||||||
{
|
{
|
||||||
MultiResponse response;
|
MultiResponse response;
|
||||||
response.responses.reserve(requests.size());
|
response.responses.reserve(requests.size());
|
||||||
|
std::vector<Undo> undo_actions;
|
||||||
/// Fairly inefficient.
|
|
||||||
auto container_copy = container;
|
|
||||||
|
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
for (const auto & request : requests)
|
for (const auto & request : requests)
|
||||||
{
|
{
|
||||||
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(*request);
|
const TestKeeperRequest & concrete_request = dynamic_cast<const TestKeeperRequest &>(*request);
|
||||||
auto cur_response = concrete_request.process(container, zxid);
|
auto [ cur_response, undo_action ] = concrete_request.process(container, zxid);
|
||||||
response.responses.emplace_back(cur_response);
|
response.responses.emplace_back(cur_response);
|
||||||
if (cur_response->error != Error::ZOK)
|
if (cur_response->error != Error::ZOK)
|
||||||
{
|
{
|
||||||
response.error = cur_response->error;
|
response.error = cur_response->error;
|
||||||
container = container_copy;
|
|
||||||
return std::make_shared<MultiResponse>(response);
|
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||||
|
if (*it)
|
||||||
|
(*it)();
|
||||||
|
|
||||||
|
return { std::make_shared<MultiResponse>(response), {} };
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
undo_actions.emplace_back(std::move(undo_action));
|
||||||
}
|
}
|
||||||
|
|
||||||
response.error = Error::ZOK;
|
response.error = Error::ZOK;
|
||||||
return std::make_shared<MultiResponse>(response);
|
return { std::make_shared<MultiResponse>(response), {} };
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
container = container_copy;
|
for (auto it = undo_actions.rbegin(); it != undo_actions.rend(); ++it)
|
||||||
|
if (*it)
|
||||||
|
(*it)();
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -476,7 +522,7 @@ void TestKeeper::processingThread()
|
|||||||
++zxid;
|
++zxid;
|
||||||
|
|
||||||
info.request->addRootPath(root_path);
|
info.request->addRootPath(root_path);
|
||||||
ResponsePtr response = info.request->process(container, zxid);
|
auto [response, _] = info.request->process(container, zxid);
|
||||||
if (response->error == Error::ZOK)
|
if (response->error == Error::ZOK)
|
||||||
info.request->processWatches(watches, list_watches);
|
info.request->processWatches(watches, list_watches);
|
||||||
|
|
||||||
|
@ -372,6 +372,7 @@ struct Settings : public SettingsCollection<Settings>
|
|||||||
M(SettingBool, optimize_duplicate_order_by_and_distinct, true, "Remove duplicate ORDER BY and DISTINCT if it's possible", 0) \
|
M(SettingBool, optimize_duplicate_order_by_and_distinct, true, "Remove duplicate ORDER BY and DISTINCT if it's possible", 0) \
|
||||||
M(SettingBool, optimize_redundant_functions_in_order_by, true, "Remove functions from ORDER BY if its argument is also in ORDER BY", 0) \
|
M(SettingBool, optimize_redundant_functions_in_order_by, true, "Remove functions from ORDER BY if its argument is also in ORDER BY", 0) \
|
||||||
M(SettingBool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
M(SettingBool, optimize_if_chain_to_multiif, false, "Replace if(cond1, then1, if(cond2, ...)) chains to multiIf. Currently it's not beneficial for numeric types.", 0) \
|
||||||
|
M(SettingBool, optimize_monotonous_functions_in_order_by, true, "Replace monotonous function with its argument in ORDER BY", 0) \
|
||||||
M(SettingBool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
|
M(SettingBool, allow_experimental_alter_materialized_view_structure, false, "Allow atomic alter on Materialized views. Work in progress.", 0) \
|
||||||
M(SettingBool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
M(SettingBool, enable_early_constant_folding, true, "Enable query optimization where we analyze function and subqueries results and rewrite query if there're constants there", 0) \
|
||||||
\
|
\
|
||||||
|
@ -10,3 +10,4 @@
|
|||||||
#cmakedefine01 USE_INTERNAL_LLVM_LIBRARY
|
#cmakedefine01 USE_INTERNAL_LLVM_LIBRARY
|
||||||
#cmakedefine01 USE_SSL
|
#cmakedefine01 USE_SSL
|
||||||
#cmakedefine01 USE_OPENCL
|
#cmakedefine01 USE_OPENCL
|
||||||
|
#cmakedefine01 USE_LDAP
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include "SSDCacheDictionary.h"
|
#include "SSDCacheDictionary.h"
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include "SSDComplexKeyCacheDictionary.h"
|
#include "SSDComplexKeyCacheDictionary.h"
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include "DictionaryStructure.h"
|
#include "DictionaryStructure.h"
|
||||||
#include "IDictionary.h"
|
#include "IDictionary.h"
|
||||||
|
@ -33,7 +33,7 @@ void registerDictionaries()
|
|||||||
registerDictionaryFlat(factory);
|
registerDictionaryFlat(factory);
|
||||||
registerDictionaryHashed(factory);
|
registerDictionaryHashed(factory);
|
||||||
registerDictionaryCache(factory);
|
registerDictionaryCache(factory);
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
registerDictionarySSDCache(factory);
|
registerDictionarySSDCache(factory);
|
||||||
registerDictionarySSDComplexKeyCache(factory);
|
registerDictionarySSDComplexKeyCache(factory);
|
||||||
#endif
|
#endif
|
||||||
|
@ -19,6 +19,7 @@ namespace ErrorCodes
|
|||||||
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
extern const int EXCESSIVE_ELEMENT_IN_CONFIG;
|
||||||
extern const int PATH_ACCESS_DENIED;
|
extern const int PATH_ACCESS_DENIED;
|
||||||
extern const int INCORRECT_DISK_INDEX;
|
extern const int INCORRECT_DISK_INDEX;
|
||||||
|
extern const int CANNOT_TRUNCATE_FILE;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::mutex DiskLocal::reservation_mutex;
|
std::mutex DiskLocal::reservation_mutex;
|
||||||
@ -261,6 +262,13 @@ void DiskLocal::createHardLink(const String & src_path, const String & dst_path)
|
|||||||
DB::createHardLink(disk_path + src_path, disk_path + dst_path);
|
DB::createHardLink(disk_path + src_path, disk_path + dst_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DiskLocal::truncateFile(const String & path, size_t size)
|
||||||
|
{
|
||||||
|
int res = truncate((disk_path + path).c_str(), size);
|
||||||
|
if (-1 == res)
|
||||||
|
throwFromErrnoWithPath("Cannot truncate file " + path, path, ErrorCodes::CANNOT_TRUNCATE_FILE);
|
||||||
|
}
|
||||||
|
|
||||||
void DiskLocal::createFile(const String & path)
|
void DiskLocal::createFile(const String & path)
|
||||||
{
|
{
|
||||||
Poco::File(disk_path + path).createFile();
|
Poco::File(disk_path + path).createFile();
|
||||||
|
@ -99,6 +99,8 @@ public:
|
|||||||
|
|
||||||
void createHardLink(const String & src_path, const String & dst_path) override;
|
void createHardLink(const String & src_path, const String & dst_path) override;
|
||||||
|
|
||||||
|
void truncateFile(const String & path, size_t size) override;
|
||||||
|
|
||||||
const String getType() const override { return "local"; }
|
const String getType() const override { return "local"; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -408,6 +408,17 @@ void DiskMemory::setReadOnly(const String &)
|
|||||||
throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
|
throw Exception("Method setReadOnly is not implemented for memory disks", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DiskMemory::truncateFile(const String & path, size_t size)
|
||||||
|
{
|
||||||
|
std::lock_guard lock(mutex);
|
||||||
|
|
||||||
|
auto file_it = files.find(path);
|
||||||
|
if (file_it == files.end())
|
||||||
|
throw Exception("File '" + path + "' doesn't exist", ErrorCodes::FILE_DOESNT_EXIST);
|
||||||
|
|
||||||
|
file_it->second.data.resize(size);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
using DiskMemoryPtr = std::shared_ptr<DiskMemory>;
|
using DiskMemoryPtr = std::shared_ptr<DiskMemory>;
|
||||||
|
|
||||||
|
@ -90,6 +90,8 @@ public:
|
|||||||
|
|
||||||
void createHardLink(const String & src_path, const String & dst_path) override;
|
void createHardLink(const String & src_path, const String & dst_path) override;
|
||||||
|
|
||||||
|
void truncateFile(const String & path, size_t size) override;
|
||||||
|
|
||||||
const String getType() const override { return "memory"; }
|
const String getType() const override { return "memory"; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -8,6 +8,11 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
|
}
|
||||||
|
|
||||||
bool IDisk::isDirectoryEmpty(const String & path)
|
bool IDisk::isDirectoryEmpty(const String & path)
|
||||||
{
|
{
|
||||||
return !iterateDirectory(path)->isValid();
|
return !iterateDirectory(path)->isValid();
|
||||||
@ -42,4 +47,9 @@ void IDisk::copy(const String & from_path, const std::shared_ptr<IDisk> & to_dis
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void IDisk::truncateFile(const String &, size_t)
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Truncate operation is not implemented for disk of type {}", getType());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -172,6 +172,9 @@ public:
|
|||||||
/// Create hardlink from `src_path` to `dst_path`.
|
/// Create hardlink from `src_path` to `dst_path`.
|
||||||
virtual void createHardLink(const String & src_path, const String & dst_path) = 0;
|
virtual void createHardLink(const String & src_path, const String & dst_path) = 0;
|
||||||
|
|
||||||
|
/// Truncate file to specified size.
|
||||||
|
virtual void truncateFile(const String & path, size_t size);
|
||||||
|
|
||||||
/// Return disk type - "local", "s3", etc.
|
/// Return disk type - "local", "s3", etc.
|
||||||
virtual const String getType() const = 0;
|
virtual const String getType() const = 0;
|
||||||
};
|
};
|
||||||
|
@ -9,3 +9,5 @@
|
|||||||
#cmakedefine01 USE_ORC
|
#cmakedefine01 USE_ORC
|
||||||
#cmakedefine01 USE_ARROW
|
#cmakedefine01 USE_ARROW
|
||||||
#cmakedefine01 USE_PROTOBUF
|
#cmakedefine01 USE_PROTOBUF
|
||||||
|
#cmakedefine01 USE_MSGPACK
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
#include <Dictionaries/FlatDictionary.h>
|
#include <Dictionaries/FlatDictionary.h>
|
||||||
#include <Dictionaries/HashedDictionary.h>
|
#include <Dictionaries/HashedDictionary.h>
|
||||||
#include <Dictionaries/CacheDictionary.h>
|
#include <Dictionaries/CacheDictionary.h>
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
#include <Dictionaries/SSDCacheDictionary.h>
|
#include <Dictionaries/SSDCacheDictionary.h>
|
||||||
#include <Dictionaries/SSDComplexKeyCacheDictionary.h>
|
#include <Dictionaries/SSDComplexKeyCacheDictionary.h>
|
||||||
#endif
|
#endif
|
||||||
@ -183,13 +183,13 @@ private:
|
|||||||
!executeDispatchSimple<DirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatchSimple<DirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchSimple<HashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatchSimple<HashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchSimple<CacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchSimple<CacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatchSimple<SSDCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchSimple<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
@ -339,13 +339,13 @@ private:
|
|||||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
@ -523,13 +523,13 @@ private:
|
|||||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
@ -863,13 +863,13 @@ private:
|
|||||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
@ -1124,13 +1124,13 @@ private:
|
|||||||
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<HashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<DirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<CacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatch<SSDCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyHashedDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyDirectDictionary>(block, arguments, result, dict) &&
|
||||||
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<ComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
!executeDispatchComplex<SSDComplexKeyCacheDictionary>(block, arguments, result, dict) &&
|
||||||
#endif
|
#endif
|
||||||
#if !defined(ARCADIA_BUILD)
|
#if !defined(ARCADIA_BUILD)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <future>
|
#include <future>
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
#if defined(__OpenBSD__) || defined(__FreeBSD__)
|
#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined (__ANDROID__)
|
||||||
# include <sys/endian.h>
|
# include <sys/endian.h>
|
||||||
#elif defined(__APPLE__)
|
#elif defined(__APPLE__)
|
||||||
# include <libkern/OSByteOrder.h>
|
# include <libkern/OSByteOrder.h>
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include <IO/ReadBufferAIO.h>
|
#include <IO/ReadBufferAIO.h>
|
||||||
#include <IO/AIOContextPool.h>
|
#include <IO/AIOContextPool.h>
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include <IO/ReadBufferFromFileBase.h>
|
#include <IO/ReadBufferFromFileBase.h>
|
||||||
#include <IO/ReadBuffer.h>
|
#include <IO/ReadBuffer.h>
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
# include <IO/ReadBufferFromIStream.h>
|
# include <IO/ReadBufferFromIStream.h>
|
||||||
# include <IO/ReadBufferFromS3.h>
|
# include <IO/ReadBufferFromS3.h>
|
||||||
|
# include <Common/Stopwatch.h>
|
||||||
|
|
||||||
# include <aws/s3/S3Client.h>
|
# include <aws/s3/S3Client.h>
|
||||||
# include <aws/s3/model/GetObjectRequest.h>
|
# include <aws/s3/model/GetObjectRequest.h>
|
||||||
@ -11,6 +12,12 @@
|
|||||||
|
|
||||||
# include <utility>
|
# include <utility>
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event S3ReadMicroseconds;
|
||||||
|
extern const Event S3ReadBytes;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
@ -27,6 +34,7 @@ ReadBufferFromS3::ReadBufferFromS3(
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool ReadBufferFromS3::nextImpl()
|
bool ReadBufferFromS3::nextImpl()
|
||||||
{
|
{
|
||||||
if (!initialized)
|
if (!initialized)
|
||||||
@ -35,9 +43,17 @@ bool ReadBufferFromS3::nextImpl()
|
|||||||
initialized = true;
|
initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!impl->next())
|
Stopwatch watch;
|
||||||
|
auto res = impl->next();
|
||||||
|
watch.stop();
|
||||||
|
ProfileEvents::increment(ProfileEvents::S3ReadMicroseconds, watch.elapsedMicroseconds());
|
||||||
|
|
||||||
|
if (!res)
|
||||||
return false;
|
return false;
|
||||||
internal_buffer = impl->buffer();
|
internal_buffer = impl->buffer();
|
||||||
|
|
||||||
|
ProfileEvents::increment(ProfileEvents::S3ReadBytes, internal_buffer.size());
|
||||||
|
|
||||||
working_buffer = internal_buffer;
|
working_buffer = internal_buffer;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <IO/HTTPCommon.h>
|
#include <IO/HTTPCommon.h>
|
||||||
#include <IO/S3/PocoHTTPResponseStream.h>
|
#include <IO/S3/PocoHTTPResponseStream.h>
|
||||||
#include <IO/S3/PocoHTTPResponseStream.cpp>
|
#include <IO/S3/PocoHTTPResponseStream.cpp>
|
||||||
|
#include <Common/Stopwatch.h>
|
||||||
#include <aws/core/http/HttpRequest.h>
|
#include <aws/core/http/HttpRequest.h>
|
||||||
#include <aws/core/http/HttpResponse.h>
|
#include <aws/core/http/HttpResponse.h>
|
||||||
#include <aws/core/http/standard/StandardHttpResponse.h>
|
#include <aws/core/http/standard/StandardHttpResponse.h>
|
||||||
@ -14,8 +15,24 @@
|
|||||||
#include <Poco/Net/HTTPResponse.h>
|
#include <Poco/Net/HTTPResponse.h>
|
||||||
#include <common/logger_useful.h>
|
#include <common/logger_useful.h>
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event S3ReadMicroseconds;
|
||||||
|
extern const Event S3ReadRequestsCount;
|
||||||
|
extern const Event S3ReadRequestsErrors;
|
||||||
|
extern const Event S3ReadRequestsThrottling;
|
||||||
|
extern const Event S3ReadRequestsRedirects;
|
||||||
|
|
||||||
|
extern const Event S3WriteMicroseconds;
|
||||||
|
extern const Event S3WriteRequestsCount;
|
||||||
|
extern const Event S3WriteRequestsErrors;
|
||||||
|
extern const Event S3WriteRequestsThrottling;
|
||||||
|
extern const Event S3WriteRequestsRedirects;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB::ErrorCodes
|
namespace DB::ErrorCodes
|
||||||
{
|
{
|
||||||
|
extern const int NOT_IMPLEMENTED;
|
||||||
extern const int TOO_MANY_REDIRECTS;
|
extern const int TOO_MANY_REDIRECTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -62,6 +79,46 @@ void PocoHTTPClient::MakeRequestInternal(
|
|||||||
auto uri = request.GetUri().GetURIString();
|
auto uri = request.GetUri().GetURIString();
|
||||||
LOG_DEBUG(log, "Make request to: {}", uri);
|
LOG_DEBUG(log, "Make request to: {}", uri);
|
||||||
|
|
||||||
|
enum class S3MetricType
|
||||||
|
{
|
||||||
|
Microseconds,
|
||||||
|
Count,
|
||||||
|
Errors,
|
||||||
|
Throttling,
|
||||||
|
Redirects,
|
||||||
|
|
||||||
|
EnumSize,
|
||||||
|
};
|
||||||
|
|
||||||
|
auto selectMetric = [&request](S3MetricType type)
|
||||||
|
{
|
||||||
|
const ProfileEvents::Event events_map[][2] = {
|
||||||
|
{ProfileEvents::S3ReadMicroseconds, ProfileEvents::S3WriteMicroseconds},
|
||||||
|
{ProfileEvents::S3ReadRequestsCount, ProfileEvents::S3WriteRequestsCount},
|
||||||
|
{ProfileEvents::S3ReadRequestsErrors, ProfileEvents::S3WriteRequestsErrors},
|
||||||
|
{ProfileEvents::S3ReadRequestsThrottling, ProfileEvents::S3WriteRequestsThrottling},
|
||||||
|
{ProfileEvents::S3ReadRequestsRedirects, ProfileEvents::S3WriteRequestsRedirects},
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert((sizeof(events_map) / sizeof(events_map[0])) == static_cast<unsigned int>(S3MetricType::EnumSize));
|
||||||
|
|
||||||
|
switch (request.GetMethod())
|
||||||
|
{
|
||||||
|
case Aws::Http::HttpMethod::HTTP_GET:
|
||||||
|
case Aws::Http::HttpMethod::HTTP_HEAD:
|
||||||
|
return events_map[static_cast<unsigned int>(type)][0]; // Read
|
||||||
|
case Aws::Http::HttpMethod::HTTP_POST:
|
||||||
|
case Aws::Http::HttpMethod::HTTP_DELETE:
|
||||||
|
case Aws::Http::HttpMethod::HTTP_PUT:
|
||||||
|
case Aws::Http::HttpMethod::HTTP_PATCH:
|
||||||
|
return events_map[static_cast<unsigned int>(type)][1]; // Write
|
||||||
|
}
|
||||||
|
|
||||||
|
throw Exception("Unsupported request method", ErrorCodes::NOT_IMPLEMENTED);
|
||||||
|
};
|
||||||
|
|
||||||
|
ProfileEvents::increment(selectMetric(S3MetricType::Count));
|
||||||
|
|
||||||
const int MAX_REDIRECT_ATTEMPTS = 10;
|
const int MAX_REDIRECT_ATTEMPTS = 10;
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -112,11 +169,15 @@ void PocoHTTPClient::MakeRequestInternal(
|
|||||||
poco_request.set(header_name, header_value);
|
poco_request.set(header_name, header_value);
|
||||||
|
|
||||||
Poco::Net::HTTPResponse poco_response;
|
Poco::Net::HTTPResponse poco_response;
|
||||||
|
|
||||||
|
Stopwatch watch;
|
||||||
|
|
||||||
auto & request_body_stream = session->sendRequest(poco_request);
|
auto & request_body_stream = session->sendRequest(poco_request);
|
||||||
|
|
||||||
if (request.GetContentBody())
|
if (request.GetContentBody())
|
||||||
{
|
{
|
||||||
LOG_TRACE(log, "Writing request body.");
|
LOG_TRACE(log, "Writing request body.");
|
||||||
|
|
||||||
if (attempt > 0) /// rewind content body buffer.
|
if (attempt > 0) /// rewind content body buffer.
|
||||||
{
|
{
|
||||||
request.GetContentBody()->clear();
|
request.GetContentBody()->clear();
|
||||||
@ -129,6 +190,9 @@ void PocoHTTPClient::MakeRequestInternal(
|
|||||||
LOG_TRACE(log, "Receiving response...");
|
LOG_TRACE(log, "Receiving response...");
|
||||||
auto & response_body_stream = session->receiveResponse(poco_response);
|
auto & response_body_stream = session->receiveResponse(poco_response);
|
||||||
|
|
||||||
|
watch.stop();
|
||||||
|
ProfileEvents::increment(selectMetric(S3MetricType::Microseconds), watch.elapsedMicroseconds());
|
||||||
|
|
||||||
int status_code = static_cast<int>(poco_response.getStatus());
|
int status_code = static_cast<int>(poco_response.getStatus());
|
||||||
LOG_DEBUG(log, "Response status: {}, {}", status_code, poco_response.getReason());
|
LOG_DEBUG(log, "Response status: {}, {}", status_code, poco_response.getReason());
|
||||||
|
|
||||||
@ -138,6 +202,8 @@ void PocoHTTPClient::MakeRequestInternal(
|
|||||||
uri = location;
|
uri = location;
|
||||||
LOG_DEBUG(log, "Redirecting request to new location: {}", location);
|
LOG_DEBUG(log, "Redirecting request to new location: {}", location);
|
||||||
|
|
||||||
|
ProfileEvents::increment(selectMetric(S3MetricType::Redirects));
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,6 +225,15 @@ void PocoHTTPClient::MakeRequestInternal(
|
|||||||
|
|
||||||
response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION);
|
response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION);
|
||||||
response->SetClientErrorMessage(error_message);
|
response->SetClientErrorMessage(error_message);
|
||||||
|
|
||||||
|
if (status_code == 429 || status_code == 503)
|
||||||
|
{ // API throttling
|
||||||
|
ProfileEvents::increment(selectMetric(S3MetricType::Throttling));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ProfileEvents::increment(selectMetric(S3MetricType::Errors));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
response->GetResponseStream().SetUnderlyingStream(std::make_shared<PocoHTTPResponseStream>(session, response_body_stream));
|
response->GetResponseStream().SetUnderlyingStream(std::make_shared<PocoHTTPResponseStream>(session, response_body_stream));
|
||||||
@ -173,6 +248,8 @@ void PocoHTTPClient::MakeRequestInternal(
|
|||||||
tryLogCurrentException(log, fmt::format("Failed to make request to: {}", uri));
|
tryLogCurrentException(log, fmt::format("Failed to make request to: {}", uri));
|
||||||
response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION);
|
response->SetClientErrorType(Aws::Client::CoreErrors::NETWORK_CONNECTION);
|
||||||
response->SetClientErrorMessage(getCurrentExceptionMessage(false));
|
response->SetClientErrorMessage(getCurrentExceptionMessage(false));
|
||||||
|
|
||||||
|
ProfileEvents::increment(selectMetric(S3MetricType::Errors));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,12 @@
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
|
|
||||||
|
const char * S3_LOGGER_TAG_NAMES[][2] = {
|
||||||
|
{"AWSClient", "AWSClient"},
|
||||||
|
{"AWSAuthV4Signer", "AWSClient (AWSAuthV4Signer)"},
|
||||||
|
};
|
||||||
|
|
||||||
const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::Utils::Logging::LogLevel log_level)
|
const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::Utils::Logging::LogLevel log_level)
|
||||||
{
|
{
|
||||||
static const std::unordered_map<Aws::Utils::Logging::LogLevel, std::pair<DB::LogsLevel, Poco::Message::Priority>> mapping =
|
static const std::unordered_map<Aws::Utils::Logging::LogLevel, std::pair<DB::LogsLevel, Poco::Message::Priority>> mapping =
|
||||||
@ -40,26 +46,46 @@ const std::pair<DB::LogsLevel, Poco::Message::Priority> & convertLogLevel(Aws::U
|
|||||||
class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface
|
class AWSLogger final : public Aws::Utils::Logging::LogSystemInterface
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
AWSLogger()
|
||||||
|
{
|
||||||
|
for (auto [tag, name] : S3_LOGGER_TAG_NAMES)
|
||||||
|
tag_loggers[tag] = &Poco::Logger::get(name);
|
||||||
|
|
||||||
|
default_logger = tag_loggers[S3_LOGGER_TAG_NAMES[0][0]];
|
||||||
|
}
|
||||||
|
|
||||||
~AWSLogger() final = default;
|
~AWSLogger() final = default;
|
||||||
|
|
||||||
Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; }
|
Aws::Utils::Logging::LogLevel GetLogLevel() const final { return Aws::Utils::Logging::LogLevel::Trace; }
|
||||||
|
|
||||||
void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final // NOLINT
|
void Log(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * format_str, ...) final // NOLINT
|
||||||
{
|
{
|
||||||
const auto & [level, prio] = convertLogLevel(log_level);
|
callLogImpl(log_level, tag, format_str); /// FIXME. Variadic arguments?
|
||||||
LOG_IMPL(log, level, prio, "{}: {}", tag, format_str);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final
|
void LogStream(Aws::Utils::Logging::LogLevel log_level, const char * tag, const Aws::OStringStream & message_stream) final
|
||||||
|
{
|
||||||
|
callLogImpl(log_level, tag, message_stream.str().c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
void callLogImpl(Aws::Utils::Logging::LogLevel log_level, const char * tag, const char * message)
|
||||||
{
|
{
|
||||||
const auto & [level, prio] = convertLogLevel(log_level);
|
const auto & [level, prio] = convertLogLevel(log_level);
|
||||||
LOG_IMPL(log, level, prio, "{}: {}", tag, message_stream.str());
|
if (tag_loggers.count(tag) > 0)
|
||||||
|
{
|
||||||
|
LOG_IMPL(tag_loggers[tag], level, prio, "{}", message);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
LOG_IMPL(default_logger, level, prio, "{}: {}", tag, message);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Flush() final {}
|
void Flush() final {}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Poco::Logger * log = &Poco::Logger::get("AWSClient");
|
Poco::Logger * default_logger;
|
||||||
|
std::unordered_map<String, Poco::Logger *> tag_loggers;
|
||||||
};
|
};
|
||||||
|
|
||||||
class S3AuthSigner : public Aws::Client::AWSAuthV4Signer
|
class S3AuthSigner : public Aws::Client::AWSAuthV4Signer
|
||||||
@ -102,8 +128,10 @@ public:
|
|||||||
private:
|
private:
|
||||||
const DB::HeaderCollection headers;
|
const DB::HeaderCollection headers;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include <IO/WriteBufferAIO.h>
|
#include <IO/WriteBufferAIO.h>
|
||||||
#include <Common/MemorySanitizer.h>
|
#include <Common/MemorySanitizer.h>
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
|
|
||||||
#include <IO/WriteBufferFromFileBase.h>
|
#include <IO/WriteBufferFromFileBase.h>
|
||||||
#include <IO/WriteBuffer.h>
|
#include <IO/WriteBuffer.h>
|
||||||
|
@ -17,6 +17,11 @@
|
|||||||
# include <utility>
|
# include <utility>
|
||||||
|
|
||||||
|
|
||||||
|
namespace ProfileEvents
|
||||||
|
{
|
||||||
|
extern const Event S3WriteBytes;
|
||||||
|
}
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
// S3 protocol does not allow to have multipart upload with more than 10000 parts.
|
// S3 protocol does not allow to have multipart upload with more than 10000 parts.
|
||||||
@ -59,6 +64,8 @@ void WriteBufferFromS3::nextImpl()
|
|||||||
|
|
||||||
temporary_buffer->write(working_buffer.begin(), offset());
|
temporary_buffer->write(working_buffer.begin(), offset());
|
||||||
|
|
||||||
|
ProfileEvents::increment(ProfileEvents::S3WriteBytes, offset());
|
||||||
|
|
||||||
if (is_multipart)
|
if (is_multipart)
|
||||||
{
|
{
|
||||||
last_part_size += offset();
|
last_part_size += offset();
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <IO/createReadBufferFromFileBase.h>
|
#include <IO/createReadBufferFromFileBase.h>
|
||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
#include <IO/ReadBufferAIO.h>
|
#include <IO/ReadBufferAIO.h>
|
||||||
#endif
|
#endif
|
||||||
#include <IO/MMapReadBufferFromFile.h>
|
#include <IO/MMapReadBufferFromFile.h>
|
||||||
@ -24,7 +24,7 @@ std::unique_ptr<ReadBufferFromFileBase> createReadBufferFromFileBase(
|
|||||||
size_t estimated_size, size_t aio_threshold, size_t mmap_threshold,
|
size_t estimated_size, size_t aio_threshold, size_t mmap_threshold,
|
||||||
size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment)
|
size_t buffer_size_, int flags_, char * existing_memory_, size_t alignment)
|
||||||
{
|
{
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
if (aio_threshold && estimated_size >= aio_threshold)
|
if (aio_threshold && estimated_size >= aio_threshold)
|
||||||
{
|
{
|
||||||
/// Attempt to open a file with O_DIRECT
|
/// Attempt to open a file with O_DIRECT
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include <IO/createWriteBufferFromFileBase.h>
|
#include <IO/createWriteBufferFromFileBase.h>
|
||||||
#include <IO/WriteBufferFromFile.h>
|
#include <IO/WriteBufferFromFile.h>
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
#include <IO/WriteBufferAIO.h>
|
#include <IO/WriteBufferAIO.h>
|
||||||
#endif
|
#endif
|
||||||
#include <Common/ProfileEvents.h>
|
#include <Common/ProfileEvents.h>
|
||||||
@ -20,7 +20,7 @@ std::unique_ptr<WriteBufferFromFileBase> createWriteBufferFromFileBase(const std
|
|||||||
size_t aio_threshold, size_t buffer_size_, int flags_, mode_t mode, char * existing_memory_,
|
size_t aio_threshold, size_t buffer_size_, int flags_, mode_t mode, char * existing_memory_,
|
||||||
size_t alignment)
|
size_t alignment)
|
||||||
{
|
{
|
||||||
#if defined(__linux__) || defined(__FreeBSD__)
|
#if defined(OS_LINUX) || defined(__FreeBSD__)
|
||||||
if (aio_threshold && estimated_size >= aio_threshold)
|
if (aio_threshold && estimated_size >= aio_threshold)
|
||||||
{
|
{
|
||||||
/// Attempt to open a file with O_DIRECT
|
/// Attempt to open a file with O_DIRECT
|
||||||
|
@ -368,7 +368,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
|
|||||||
}
|
}
|
||||||
|
|
||||||
SetPtr prepared_set;
|
SetPtr prepared_set;
|
||||||
if (functionIsInOrGlobalInOperator(node.name))
|
if (checkFunctionIsInOrGlobalInOperator(node))
|
||||||
{
|
{
|
||||||
/// Let's find the type of the first argument (then getActionsImpl will be called again and will not affect anything).
|
/// Let's find the type of the first argument (then getActionsImpl will be called again and will not affect anything).
|
||||||
visit(node.arguments->children.at(0), data);
|
visit(node.arguments->children.at(0), data);
|
||||||
@ -445,7 +445,7 @@ void ActionsMatcher::visit(const ASTFunction & node, const ASTPtr & ast, Data &
|
|||||||
/// Select the name in the next cycle.
|
/// Select the name in the next cycle.
|
||||||
argument_names.emplace_back();
|
argument_names.emplace_back();
|
||||||
}
|
}
|
||||||
else if (functionIsInOrGlobalInOperator(node.name) && arg == 1 && prepared_set)
|
else if (checkFunctionIsInOrGlobalInOperator(node) && arg == 1 && prepared_set)
|
||||||
{
|
{
|
||||||
ColumnWithTypeAndName column;
|
ColumnWithTypeAndName column;
|
||||||
column.type = std::make_shared<DataTypeSet>();
|
column.type = std::make_shared<DataTypeSet>();
|
||||||
|
@ -621,6 +621,7 @@ void Context::setConfig(const ConfigurationPtr & config)
|
|||||||
{
|
{
|
||||||
auto lock = getLock();
|
auto lock = getLock();
|
||||||
shared->config = config;
|
shared->config = config;
|
||||||
|
shared->access_control_manager.setExternalAuthenticatorsConfig(*shared->config);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Poco::Util::AbstractConfiguration & Context::getConfigRef() const
|
const Poco::Util::AbstractConfiguration & Context::getConfigRef() const
|
||||||
@ -640,6 +641,11 @@ const AccessControlManager & Context::getAccessControlManager() const
|
|||||||
return shared->access_control_manager;
|
return shared->access_control_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Context::setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config)
|
||||||
|
{
|
||||||
|
auto lock = getLock();
|
||||||
|
shared->access_control_manager.setExternalAuthenticatorsConfig(config);
|
||||||
|
}
|
||||||
|
|
||||||
void Context::setUsersConfig(const ConfigurationPtr & config)
|
void Context::setUsersConfig(const ConfigurationPtr & config)
|
||||||
{
|
{
|
||||||
|
@ -245,6 +245,9 @@ public:
|
|||||||
AccessControlManager & getAccessControlManager();
|
AccessControlManager & getAccessControlManager();
|
||||||
const AccessControlManager & getAccessControlManager() const;
|
const AccessControlManager & getAccessControlManager() const;
|
||||||
|
|
||||||
|
/// Sets external authenticators config (LDAP).
|
||||||
|
void setExternalAuthenticatorsConfig(const Poco::Util::AbstractConfiguration & config);
|
||||||
|
|
||||||
/** Take the list of users, quotas and configuration profiles from this config.
|
/** Take the list of users, quotas and configuration profiles from this config.
|
||||||
* The list of users is completely replaced.
|
* The list of users is completely replaced.
|
||||||
* The accumulated quota values are not reset if the quota is not deleted.
|
* The accumulated quota values are not reset if the quota is not deleted.
|
||||||
|
@ -180,7 +180,7 @@ void ExecuteScalarSubqueriesMatcher::visit(const ASTFunction & func, ASTPtr & as
|
|||||||
/// But if an argument is not subquery, than deeper may be scalar subqueries and we need to descend in them.
|
/// But if an argument is not subquery, than deeper may be scalar subqueries and we need to descend in them.
|
||||||
|
|
||||||
std::vector<ASTPtr *> out;
|
std::vector<ASTPtr *> out;
|
||||||
if (functionIsInOrGlobalInOperator(func.name))
|
if (checkFunctionIsInOrGlobalInOperator(func))
|
||||||
{
|
{
|
||||||
for (auto & child : ast->children)
|
for (auto & child : ast->children)
|
||||||
{
|
{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user