Merge branch 'master' into processors-2

This commit is contained in:
Nikolai Kochetov 2019-09-12 16:28:51 +03:00
commit 1319168a3a
119 changed files with 1568 additions and 526 deletions

View File

@ -1,3 +1,6 @@
project(ClickHouse)
cmake_minimum_required(VERSION 3.3)
foreach(policy
CMP0023
CMP0048 # CMake 3.0
@ -10,9 +13,6 @@ foreach(policy
endif()
endforeach()
project(ClickHouse)
cmake_minimum_required(VERSION 3.3)
# Ignore export() since we don't use it,
# but it gets broken with a global targets via link_libraries()
macro (export)
@ -150,8 +150,12 @@ endif ()
if (LINKER_NAME)
message(STATUS "Using linker: ${LINKER_NAME} (selected from: LLD_PATH=${LLD_PATH}; GOLD_PATH=${GOLD_PATH}; COMPILER_POSTFIX=${COMPILER_POSTFIX})")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fuse-ld=${LINKER_NAME}")
endif ()
# Make sure the final executable has symbols exported
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -rdynamic")
cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) # Not available under freebsd
if(NOT AVAILABLE_PHYSICAL_MEMORY OR AVAILABLE_PHYSICAL_MEMORY GREATER 8000)
option(COMPILER_PIPE "-pipe compiler option [less /tmp usage, more ram usage]" ON)
@ -226,8 +230,8 @@ endif ()
# Make this extra-checks for correct library dependencies.
if (NOT SANITIZE)
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-undefined")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-undefined")
endif ()
include(cmake/dbms_glob_sources.cmake)
@ -343,7 +347,7 @@ include (cmake/find_hyperscan.cmake)
include (cmake/find_simdjson.cmake)
include (cmake/find_rapidjson.cmake)
include (cmake/find_fastops.cmake)
include (cmake/find_orc.cmake)
#include (cmake/find_orc.cmake)
find_contrib_lib(cityhash)
find_contrib_lib(farmhash)
@ -360,6 +364,29 @@ include (libs/libcommon/cmake/find_jemalloc.cmake)
include (libs/libcommon/cmake/find_cctz.cmake)
include (libs/libmysqlxx/cmake/find_mysqlclient.cmake)
# When testing for memory leaks with Valgrind, dont link tcmalloc or jemalloc.
if (USE_JEMALLOC)
message (STATUS "Link jemalloc: ${JEMALLOC_LIBRARIES}")
set (MALLOC_LIBRARIES ${JEMALLOC_LIBRARIES})
elseif (USE_TCMALLOC)
if (DEBUG_TCMALLOC AND NOT GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (FATAL_ERROR "Requested DEBUG_TCMALLOC but debug library is not found. You should install Google Perftools. Example: sudo apt-get install libgoogle-perftools-dev")
endif ()
if (DEBUG_TCMALLOC AND GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG)
message (STATUS "Link libtcmalloc_minimal_debug for testing: ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL_DEBUG})
else ()
message (STATUS "Link libtcmalloc_minimal: ${GPERFTOOLS_TCMALLOC_MINIMAL}")
set (MALLOC_LIBRARIES ${GPERFTOOLS_TCMALLOC_MINIMAL})
endif ()
elseif (SANITIZE)
message (STATUS "Will use ${SANITIZE} sanitizer.")
else ()
message (WARNING "Non default allocator is disabled. This is not recommended for production Linux builds.")
endif ()
include (cmake/print_flags.cmake)
install (EXPORT global DESTINATION cmake)

View File

@ -12,8 +12,7 @@
# https://youtrack.jetbrains.com/issue/CPP-2659
# https://youtrack.jetbrains.com/issue/CPP-870
string(TOLOWER "${CMAKE_COMMAND}" CMAKE_COMMAND_LOWER)
if (NOT ${CMAKE_COMMAND_LOWER} MATCHES "clion")
if (NOT DEFINED ENV{CLION_IDE})
find_program(NINJA_PATH ninja)
if (NINJA_PATH)
set(CMAKE_GENERATOR "Ninja" CACHE INTERNAL "" FORCE)

View File

@ -13,9 +13,9 @@ ClickHouse is an open-source column-oriented database management system that all
* You can also [fill this form](https://forms.yandex.com/surveys/meet-yandex-clickhouse-team/) to meet Yandex ClickHouse team in person.
## Upcoming Events
* [ClickHouse Meetup in Moscow](https://yandex.ru/promo/clickhouse/moscow-2019) on September 5.
* [ClickHouse Meetup in Munich](https://www.meetup.com/ClickHouse-Meetup-Munich/events/264185199/) on September 17.
* [ClickHouse Meetup in Paris](https://www.eventbrite.com/e/clickhouse-paris-meetup-2019-registration-68493270215) on October 3.
* [ClickHouse Meetup in Hong Kong](https://www.meetup.com/Hong-Kong-Machine-Learning-Meetup/events/263580542/) on October 17.
* [ClickHouse Meetup in Shenzhen](https://www.huodongxing.com/event/3483759917300) on October 20.
* [ClickHouse Meetup in Shanghai](https://www.huodongxing.com/event/4483760336000) on October 27.
* [ClickHouse Meetup in Tokyo](https://clickhouse.connpass.com/event/147001/) on November 14.

View File

@ -1,20 +1,26 @@
option (USE_CAPNP "Enable Cap'n Proto" ON)
option (ENABLE_CAPNP "Enable Cap'n Proto" ON)
if (USE_CAPNP)
option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${NOT_UNBUNDLED})
if (ENABLE_CAPNP)
# FIXME: refactor to use `add_library( IMPORTED)` if possible.
if (NOT USE_INTERNAL_CAPNP_LIBRARY)
find_library (KJ kj)
find_library (CAPNP capnp)
find_library (CAPNPC capnpc)
option (USE_INTERNAL_CAPNP_LIBRARY "Set to FALSE to use system capnproto library instead of bundled" ${NOT_UNBUNDLED})
set (CAPNP_LIBRARIES ${CAPNPC} ${CAPNP} ${KJ})
else ()
add_subdirectory(contrib/capnproto-cmake)
# FIXME: refactor to use `add_library( IMPORTED)` if possible.
if (NOT USE_INTERNAL_CAPNP_LIBRARY)
find_library (KJ kj)
find_library (CAPNP capnp)
find_library (CAPNPC capnpc)
set (CAPNP_LIBRARIES capnpc)
endif ()
set (CAPNP_LIBRARIES ${CAPNPC} ${CAPNP} ${KJ})
else ()
add_subdirectory(contrib/capnproto-cmake)
message (STATUS "Using capnp: ${CAPNP_LIBRARIES}")
set (CAPNP_LIBRARIES capnpc)
endif ()
if (CAPNP_LIBRARIES)
set (USE_CAPNP 1)
endif ()
endif ()
message (STATUS "Using capnp: ${CAPNP_LIBRARIES}")

View File

@ -1,24 +1,29 @@
if (NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF)
option (ENABLE_HDFS "Enable HDFS" ${NOT_UNBUNDLED})
endif ()
if(NOT ARCH_ARM AND NOT OS_FREEBSD AND NOT APPLE AND USE_PROTOBUF)
option(ENABLE_HDFS "Enable HDFS" 1)
endif()
if (ENABLE_HDFS AND NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/hdfs.h")
message (WARNING "submodule contrib/libhdfs3 is missing. to fix try run: \n git submodule update --init --recursive")
set (ENABLE_HDFS 0)
endif ()
if(ENABLE_HDFS)
option(USE_INTERNAL_HDFS3_LIBRARY "Set to FALSE to use system HDFS3 instead of bundled" ${NOT_UNBUNDLED})
if (ENABLE_HDFS)
option (USE_INTERNAL_HDFS3_LIBRARY "Set to FALSE to use system HDFS3 instead of bundled" ON)
if(NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include/hdfs/hdfs.h")
if(USE_INTERNAL_HDFS3_LIBRARY)
message(WARNING "submodule contrib/libhdfs3 is missing. to fix try run: \n git submodule update --init --recursive")
endif()
set(MISSING_INTERNAL_HDFS3_LIBRARY 1)
set(USE_INTERNAL_HDFS3_LIBRARY 0)
endif()
if (NOT USE_INTERNAL_HDFS3_LIBRARY)
find_package(hdfs3)
endif ()
if(NOT USE_INTERNAL_HDFS3_LIBRARY)
find_library(HDFS3_LIBRARY hdfs3)
find_path(HDFS3_INCLUDE_DIR NAMES hdfs/hdfs.h PATHS ${HDFS3_INCLUDE_PATHS})
endif()
if (HDFS3_LIBRARY AND HDFS3_INCLUDE_DIR)
if(HDFS3_LIBRARY AND HDFS3_INCLUDE_DIR)
set(USE_HDFS 1)
elseif (LIBGSASL_LIBRARY AND LIBXML2_LIBRARY)
elseif(NOT MISSING_INTERNAL_HDFS3_LIBRARY AND LIBGSASL_LIBRARY AND LIBXML2_LIBRARY)
set(HDFS3_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libhdfs3/include")
set(HDFS3_LIBRARY hdfs3)
set(USE_INTERNAL_HDFS3_LIBRARY 1)
set(USE_HDFS 1)
else()
set(USE_INTERNAL_HDFS3_LIBRARY 0)
@ -26,4 +31,4 @@ endif()
endif()
message (STATUS "Using hdfs3=${USE_HDFS}: ${HDFS3_INCLUDE_DIR} : ${HDFS3_LIBRARY}")
message(STATUS "Using hdfs3=${USE_HDFS}: ${HDFS3_INCLUDE_DIR} : ${HDFS3_LIBRARY}")

View File

@ -18,22 +18,12 @@ if (ENABLE_EMBEDDED_COMPILER)
elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
find_package(LLVM ${CMAKE_CXX_COMPILER_VERSION} CONFIG PATHS ${LLVM_PATHS})
else ()
#TODO:
#if(NOT LLVM_FOUND)
# find_package(LLVM 9 CONFIG PATHS ${LLVM_PATHS})
#endif()
#if(NOT LLVM_FOUND)
# find_package(LLVM 8 CONFIG PATHS ${LLVM_PATHS})
#endif()
if (NOT LLVM_FOUND)
find_package (LLVM 7 CONFIG PATHS ${LLVM_PATHS})
endif ()
if (NOT LLVM_FOUND)
find_package (LLVM 6 CONFIG PATHS ${LLVM_PATHS})
endif ()
if (NOT LLVM_FOUND)
find_package (LLVM 5 CONFIG PATHS ${LLVM_PATHS})
endif ()
# TODO: 9 8
foreach(llvm_v 7.1 7 6 5)
if (NOT LLVM_FOUND)
find_package (LLVM ${llvm_v} CONFIG PATHS ${LLVM_PATHS})
endif ()
endforeach ()
endif ()
if (LLVM_FOUND)

View File

@ -1,8 +1,38 @@
##TODO replace hardcode to find procedure
option (ENABLE_ORC "Enable ORC" 1)
set(USE_ORC 0)
set(USE_INTERNAL_ORC_LIBRARY ON)
if(ENABLE_ORC)
option (USE_INTERNAL_ORC_LIBRARY "Set to FALSE to use system ORC instead of bundled" ${NOT_UNBUNDLED})
if (ARROW_LIBRARY)
if (NOT EXISTS "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include/orc/OrcFile.hh")
if(USE_INTERNAL_ORC_LIBRARY)
message(WARNING "submodule contrib/orc is missing. to fix try run: \n git submodule update --init --recursive")
set(USE_INTERNAL_ORC_LIBRARY 0)
endif()
set(MISSING_INTERNAL_ORC_LIBRARY 1)
endif ()
if (NOT USE_INTERNAL_ORC_LIBRARY)
find_package(orc)
endif ()
#if (USE_INTERNAL_ORC_LIBRARY)
#find_path(CYRUS_SASL_INCLUDE_DIR sasl/sasl.h)
#find_library(CYRUS_SASL_SHARED_LIB sasl2)
#if (NOT CYRUS_SASL_INCLUDE_DIR OR NOT CYRUS_SASL_SHARED_LIB)
# set(USE_ORC 0)
#endif()
#endif()
if (ORC_LIBRARY AND ORC_INCLUDE_DIR)
set(USE_ORC 1)
endif()
elseif(NOT MISSING_INTERNAL_ORC_LIBRARY AND ARROW_LIBRARY) # (LIBGSASL_LIBRARY AND LIBXML2_LIBRARY)
set(ORC_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include")
set(ORC_LIBRARY orc)
set(USE_ORC 1)
else()
set(USE_INTERNAL_ORC_LIBRARY 0)
endif()
endif()
message (STATUS "Using internal=${USE_INTERNAL_ORC_LIBRARY} orc=${USE_ORC}: ${ORC_INCLUDE_DIR} : ${ORC_LIBRARY}")

View File

@ -62,6 +62,7 @@ elseif(NOT MISSING_INTERNAL_PARQUET_LIBRARY AND NOT OS_FREEBSD)
endif()
set(USE_PARQUET 1)
set(USE_ORC 1)
endif()
endif()

View File

@ -10,19 +10,6 @@ endif ()
set_property(DIRECTORY PROPERTY EXCLUDE_FROM_ALL 1)
if (USE_INTERNAL_ORC_LIBRARY)
set(BUILD_JAVA OFF)
set (ANALYZE_JAVA OFF)
set (BUILD_CPP_TESTS OFF)
set (BUILD_TOOLS OFF)
option(BUILD_JAVA OFF)
option (ANALYZE_JAVA OFF)
option (BUILD_CPP_TESTS OFF)
option (BUILD_TOOLS OFF)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/contrib/orc/cmake_modules")
add_subdirectory(orc)
endif()
if (USE_INTERNAL_BOOST_LIBRARY)
add_subdirectory (boost-cmake)
endif ()
@ -327,3 +314,7 @@ endif()
if (USE_FASTOPS)
add_subdirectory (fastops-cmake)
endif()
#if (USE_INTERNAL_ORC_LIBRARY)
# add_subdirectory(orc-cmake)
#endif ()

View File

@ -56,11 +56,11 @@ set(ORC_SOURCE_WRAP_DIR ${ORC_SOURCE_DIR}/wrap)
set(ORC_BUILD_SRC_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/src)
set(ORC_BUILD_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/../orc/c++/include)
set(GOOGLE_PROTOBUF_DIR ${ClickHouse_SOURCE_DIR}/contrib/protobuf/src/)
set(GOOGLE_PROTOBUF_DIR ${Protobuf_INCLUDE_DIR}/)
set(ORC_ADDITION_SOURCE_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(ARROW_SRC_DIR ${ClickHouse_SOURCE_DIR}/contrib/arrow/cpp/src)
set(PROTOBUF_EXECUTABLE ${CMAKE_CURRENT_BINARY_DIR}/../protobuf/cmake/protoc)
set(PROTOBUF_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE})
set(PROTO_DIR ${ORC_SOURCE_DIR}/../proto)
@ -70,14 +70,10 @@ add_custom_command(OUTPUT orc_proto.pb.h orc_proto.pb.cc
--cpp_out="${CMAKE_CURRENT_BINARY_DIR}"
"${PROTO_DIR}/orc_proto.proto")
include_directories(SYSTEM ${ORC_INCLUDE_DIR})
include_directories(SYSTEM ${ORC_SOURCE_SRC_DIR})
include_directories(SYSTEM ${ORC_SOURCE_WRAP_DIR})
include_directories(SYSTEM ${GOOGLE_PROTOBUF_DIR})
include_directories(SYSTEM ${ORC_BUILD_SRC_DIR})
include_directories(SYSTEM ${ORC_BUILD_INCLUDE_DIR})
include_directories(SYSTEM ${ORC_ADDITION_SOURCE_DIR})
include_directories(SYSTEM ${ARROW_SRC_DIR})
include(${ClickHouse_SOURCE_DIR}/contrib/orc/cmake_modules/CheckSourceCompiles.cmake)
include(orc_check.cmake)
configure_file("${ORC_INCLUDE_DIR}/orc/orc-config.hh.in" "${ORC_BUILD_INCLUDE_DIR}/orc/orc-config.hh")
configure_file("${ORC_SOURCE_SRC_DIR}/Adaptor.hh.in" "${ORC_BUILD_INCLUDE_DIR}/Adaptor.hh")
set(ORC_SRCS
@ -232,6 +228,14 @@ if (ARROW_WITH_ZSTD)
target_link_libraries(${ARROW_LIBRARY} PRIVATE ${ZSTD_LIBRARY})
endif()
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_SOURCE_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_SOURCE_WRAP_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${GOOGLE_PROTOBUF_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_BUILD_SRC_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_BUILD_INCLUDE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ORC_ADDITION_SOURCE_DIR})
target_include_directories(${ARROW_LIBRARY} PRIVATE SYSTEM ${ARROW_SRC_DIR})
# === parquet

View File

@ -0,0 +1,130 @@
# Not changed part of contrib/orc/c++/src/CMakeLists.txt
INCLUDE(CheckCXXSourceCompiles)
CHECK_CXX_SOURCE_COMPILES("
#include<fcntl.h>
#include<unistd.h>
int main(int,char*[]){
int f = open(\"/x/y\", O_RDONLY);
char buf[100];
return pread(f, buf, 100, 1000) == 0;
}"
HAS_PREAD
)
CHECK_CXX_SOURCE_COMPILES("
#include<time.h>
int main(int,char*[]){
struct tm time2020;
return !strptime(\"2020-02-02 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2020);
}"
HAS_STRPTIME
)
CHECK_CXX_SOURCE_COMPILES("
#include<string>
int main(int,char* argv[]){
return static_cast<int>(std::stoll(argv[0]));
}"
HAS_STOLL
)
CHECK_CXX_SOURCE_COMPILES("
#include<stdint.h>
#include<stdio.h>
int main(int,char*[]){
int64_t x = 1; printf(\"%lld\",x);
}"
INT64_IS_LL
)
CHECK_CXX_SOURCE_COMPILES("
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored \"-Wdeprecated\"
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored \"-Wdeprecated\"
#pragma GCC diagnostic pop
#elif defined(_MSC_VER)
#pragma warning( push )
#pragma warning( disable : 4996 )
#pragma warning( pop )
#else
unknownCompiler!
#endif
int main(int, char *[]) {}"
HAS_DIAGNOSTIC_PUSH
)
CHECK_CXX_SOURCE_COMPILES("
#include<cmath>
int main(int, char *[]) {
return std::isnan(1.0f);
}"
HAS_STD_ISNAN
)
CHECK_CXX_SOURCE_COMPILES("
#include<mutex>
int main(int, char *[]) {
std::mutex test_mutex;
std::lock_guard<std::mutex> lock_mutex(test_mutex);
}"
HAS_STD_MUTEX
)
CHECK_CXX_SOURCE_COMPILES("
#include<string>
std::string func() {
std::string var = \"test\";
return std::move(var);
}
int main(int, char *[]) {}"
NEEDS_REDUNDANT_MOVE
)
INCLUDE(CheckCXXSourceRuns)
CHECK_CXX_SOURCE_RUNS("
#include<time.h>
int main(int, char *[]) {
time_t t = -14210715; // 1969-07-20 12:34:45
struct tm *ptm = gmtime(&t);
return !(ptm && ptm->tm_year == 69);
}"
HAS_PRE_1970
)
CHECK_CXX_SOURCE_RUNS("
#include<stdlib.h>
#include<time.h>
int main(int, char *[]) {
setenv(\"TZ\", \"America/Los_Angeles\", 1);
tzset();
struct tm time2037;
struct tm time2038;
strptime(\"2037-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2037);
strptime(\"2038-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2038);
return mktime(&time2038) - mktime(&time2037) != 31536000;
}"
HAS_POST_2038
)
set(CMAKE_REQUIRED_INCLUDES ${ZLIB_INCLUDE_DIR})
set(CMAKE_REQUIRED_LIBRARIES zlib)
CHECK_CXX_SOURCE_COMPILES("
#define Z_PREFIX
#include<zlib.h>
z_stream strm;
int main(int, char *[]) {
deflateReset(&strm);
}"
NEEDS_Z_PREFIX
)
# See https://cmake.org/cmake/help/v3.14/policy/CMP0075.html. Without unsetting it breaks thrift.
set(CMAKE_REQUIRED_INCLUDES)
set(CMAKE_REQUIRED_LIBRARIES)

View File

@ -199,17 +199,17 @@ if (WITH_KERBEROS)
endif()
target_include_directories(hdfs3 PRIVATE ${LIBXML2_INCLUDE_DIR})
target_link_libraries(hdfs3 ${LIBGSASL_LIBRARY})
target_link_libraries(hdfs3 PRIVATE ${LIBGSASL_LIBRARY})
if (WITH_KERBEROS)
target_link_libraries(hdfs3 ${KERBEROS_LIBRARIES})
target_link_libraries(hdfs3 PRIVATE ${KERBEROS_LIBRARIES})
endif()
target_link_libraries(hdfs3 ${LIBXML2_LIBRARY})
target_link_libraries(hdfs3 PRIVATE ${LIBXML2_LIBRARY})
# inherit from parent cmake
target_include_directories(hdfs3 PRIVATE ${Boost_INCLUDE_DIRS})
target_include_directories(hdfs3 PRIVATE ${Protobuf_INCLUDE_DIR})
target_link_libraries(hdfs3 ${Protobuf_LIBRARY})
target_link_libraries(hdfs3 PRIVATE ${Protobuf_LIBRARY})
if(OPENSSL_INCLUDE_DIR AND OPENSSL_LIBRARIES)
target_include_directories(hdfs3 PRIVATE ${OPENSSL_INCLUDE_DIR})
target_link_libraries(hdfs3 ${OPENSSL_LIBRARIES})
target_link_libraries(hdfs3 PRIVATE ${OPENSSL_LIBRARIES})
endif()

View File

@ -0,0 +1,229 @@
# modifyed copy of contrib/orc/c++/src/CMakeLists.txt
set(LIBRARY_INCLUDE ${ClickHouse_SOURCE_DIR}/contrib/orc/c++/include)
set(LIBRARY_DIR ${ClickHouse_SOURCE_DIR}/contrib/orc/c++/src)
set(PROTOBUF_INCLUDE_DIR ${Protobuf_INCLUDE_DIR})
set(PROTOBUF_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE})
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CXX11_FLAGS} ${WARN_FLAGS}")
INCLUDE(CheckCXXSourceCompiles)
CHECK_CXX_SOURCE_COMPILES("
#include<fcntl.h>
#include<unistd.h>
int main(int,char*[]){
int f = open(\"/x/y\", O_RDONLY);
char buf[100];
return pread(f, buf, 100, 1000) == 0;
}"
HAS_PREAD
)
CHECK_CXX_SOURCE_COMPILES("
#include<time.h>
int main(int,char*[]){
struct tm time2020;
return !strptime(\"2020-02-02 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2020);
}"
HAS_STRPTIME
)
CHECK_CXX_SOURCE_COMPILES("
#include<string>
int main(int,char* argv[]){
return static_cast<int>(std::stoll(argv[0]));
}"
HAS_STOLL
)
CHECK_CXX_SOURCE_COMPILES("
#include<stdint.h>
#include<stdio.h>
int main(int,char*[]){
int64_t x = 1; printf(\"%lld\",x);
}"
INT64_IS_LL
)
CHECK_CXX_SOURCE_COMPILES("
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored \"-Wdeprecated\"
#pragma clang diagnostic pop
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored \"-Wdeprecated\"
#pragma GCC diagnostic pop
#elif defined(_MSC_VER)
#pragma warning( push )
#pragma warning( disable : 4996 )
#pragma warning( pop )
#else
unknownCompiler!
#endif
int main(int, char *[]) {}"
HAS_DIAGNOSTIC_PUSH
)
CHECK_CXX_SOURCE_COMPILES("
#include<cmath>
int main(int, char *[]) {
return std::isnan(1.0f);
}"
HAS_STD_ISNAN
)
CHECK_CXX_SOURCE_COMPILES("
#include<mutex>
int main(int, char *[]) {
std::mutex test_mutex;
std::lock_guard<std::mutex> lock_mutex(test_mutex);
}"
HAS_STD_MUTEX
)
CHECK_CXX_SOURCE_COMPILES("
#include<string>
std::string func() {
std::string var = \"test\";
return std::move(var);
}
int main(int, char *[]) {}"
NEEDS_REDUNDANT_MOVE
)
INCLUDE(CheckCXXSourceRuns)
CHECK_CXX_SOURCE_RUNS("
#include<time.h>
int main(int, char *[]) {
time_t t = -14210715; // 1969-07-20 12:34:45
struct tm *ptm = gmtime(&t);
return !(ptm && ptm->tm_year == 69);
}"
HAS_PRE_1970
)
CHECK_CXX_SOURCE_RUNS("
#include<stdlib.h>
#include<time.h>
int main(int, char *[]) {
setenv(\"TZ\", \"America/Los_Angeles\", 1);
tzset();
struct tm time2037;
struct tm time2038;
strptime(\"2037-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2037);
strptime(\"2038-05-05 12:34:56\", \"%Y-%m-%d %H:%M:%S\", &time2038);
return mktime(&time2038) - mktime(&time2037) != 31536000;
}"
HAS_POST_2038
)
set(CMAKE_REQUIRED_INCLUDES ${ZLIB_INCLUDE_DIR})
set(CMAKE_REQUIRED_LIBRARIES zlib)
CHECK_CXX_SOURCE_COMPILES("
#define Z_PREFIX
#include<zlib.h>
z_stream strm;
int main(int, char *[]) {
deflateReset(&strm);
}"
NEEDS_Z_PREFIX
)
configure_file (
"${LIBRARY_DIR}/Adaptor.hh.in"
"${CMAKE_CURRENT_BINARY_DIR}/Adaptor.hh"
)
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h ${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.cc
COMMAND ${PROTOBUF_EXECUTABLE}
-I${ClickHouse_SOURCE_DIR}/contrib/orc/proto
--cpp_out="${CMAKE_CURRENT_BINARY_DIR}"
"${ClickHouse_SOURCE_DIR}/contrib/orc/proto/orc_proto.proto"
)
set(SOURCE_FILES
"${CMAKE_CURRENT_BINARY_DIR}/Adaptor.hh"
${CMAKE_CURRENT_BINARY_DIR}/orc_proto.pb.h
${LIBRARY_DIR}/io/InputStream.cc
${LIBRARY_DIR}/io/OutputStream.cc
${LIBRARY_DIR}/wrap/orc-proto-wrapper.cc
${LIBRARY_DIR}/Adaptor.cc
${LIBRARY_DIR}/ByteRLE.cc
${LIBRARY_DIR}/ColumnPrinter.cc
${LIBRARY_DIR}/ColumnReader.cc
${LIBRARY_DIR}/ColumnWriter.cc
${LIBRARY_DIR}/Common.cc
${LIBRARY_DIR}/Compression.cc
${LIBRARY_DIR}/Exceptions.cc
${LIBRARY_DIR}/Int128.cc
${LIBRARY_DIR}/LzoDecompressor.cc
${LIBRARY_DIR}/MemoryPool.cc
${LIBRARY_DIR}/OrcFile.cc
${LIBRARY_DIR}/Reader.cc
${LIBRARY_DIR}/RLEv1.cc
${LIBRARY_DIR}/RLEv2.cc
${LIBRARY_DIR}/RLE.cc
${LIBRARY_DIR}/Statistics.cc
${LIBRARY_DIR}/StripeStream.cc
${LIBRARY_DIR}/Timezone.cc
${LIBRARY_DIR}/TypeImpl.cc
${LIBRARY_DIR}/Vector.cc
${LIBRARY_DIR}/Writer.cc
)
if(ORC_CXX_HAS_THREAD_LOCAL AND BUILD_LIBHDFSPP)
set(SOURCE_FILES ${SOURCE_FILES} ${LIBRARY_DIR}/OrcHdfsFile.cc)
endif(ORC_CXX_HAS_THREAD_LOCAL AND BUILD_LIBHDFSPP)
#list(TRANSFORM SOURCE_FILES PREPEND ${LIBRARY_DIR}/)
configure_file (
"${LIBRARY_INCLUDE}/orc/orc-config.hh.in"
"${CMAKE_CURRENT_BINARY_DIR}/orc/orc-config.hh"
)
add_library (orc ${SOURCE_FILES})
target_include_directories (orc
PRIVATE
${LIBRARY_INCLUDE}
${LIBRARY_DIR}
#PUBLIC
${CMAKE_CURRENT_BINARY_DIR}
PRIVATE
${PROTOBUF_INCLUDE_DIR}
${ZLIB_INCLUDE_DIR}
${SNAPPY_INCLUDE_DIR}
${LZ4_INCLUDE_DIR}
${LIBHDFSPP_INCLUDE_DIR}
)
target_link_libraries (orc PRIVATE
${Protobuf_LIBRARY}
${ZLIB_LIBRARIES}
${SNAPPY_LIBRARY}
${LZ4_LIBRARY}
${LIBHDFSPP_LIBRARIES}
)
#install(TARGETS orc DESTINATION lib)
if(ORC_CXX_HAS_THREAD_LOCAL AND BUILD_LIBHDFSPP)
add_definitions(-DBUILD_LIBHDFSPP)
endif(ORC_CXX_HAS_THREAD_LOCAL AND BUILD_LIBHDFSPP)

View File

@ -101,28 +101,6 @@ add_headers_and_sources(clickhouse_common_io src/Common)
add_headers_and_sources(clickhouse_common_io src/Common/HashTable)
add_headers_and_sources(clickhouse_common_io src/IO)
add_headers_and_sources(dbms src/Core)
add_headers_and_sources(dbms src/Compression/)
add_headers_and_sources(dbms src/DataStreams)
add_headers_and_sources(dbms src/DataTypes)
add_headers_and_sources(dbms src/Databases)
add_headers_and_sources(dbms src/Interpreters)
add_headers_and_sources(dbms src/Interpreters/ClusterProxy)
add_headers_and_sources(dbms src/Columns)
add_headers_and_sources(dbms src/Storages)
add_headers_and_sources(dbms src/Storages/Distributed)
add_headers_and_sources(dbms src/Storages/MergeTree)
add_headers_and_sources(dbms src/Storages/LiveView)
add_headers_and_sources(dbms src/Client)
add_headers_and_sources(dbms src/Formats)
add_headers_and_sources(dbms src/Processors)
add_headers_and_sources(dbms src/Processors/Executors)
add_headers_and_sources(dbms src/Processors/Formats)
add_headers_and_sources(dbms src/Processors/Formats/Impl)
add_headers_and_sources(dbms src/Processors/Transforms)
add_headers_and_sources(dbms src/Processors/Sources)
add_headers_only(dbms src/Server)
if(USE_RDKAFKA)
add_headers_and_sources(dbms src/Storages/Kafka)
endif()
@ -160,23 +138,71 @@ if (OS_FREEBSD)
target_compile_definitions (clickhouse_common_io PUBLIC CLOCK_MONOTONIC_COARSE=CLOCK_MONOTONIC_FAST)
endif ()
if (USE_UNWIND)
target_link_libraries (clickhouse_common_io PRIVATE ${UNWIND_LIBRARIES})
endif ()
add_subdirectory(src/Common/ZooKeeper)
add_subdirectory(src/Common/Config)
set (all_modules)
macro(add_object_library name common_path)
if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
add_glob(dbms_headers RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${common_path}/*.h)
add_glob(dbms_sources ${common_path}/*.cpp ${common_path}/*.c ${common_path}/*.h)
else ()
list (APPEND all_modules ${name})
add_glob(${name}_headers RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ${common_path}/*.h)
add_glob(${name}_sources ${common_path}/*.cpp ${common_path}/*.c ${common_path}/*.h)
add_library(${name} SHARED ${${name}_sources} ${${name}_headers})
target_link_libraries (${name} PRIVATE -Wl,--unresolved-symbols=ignore-all)
endif ()
endmacro()
add_object_library(clickhouse_core src/Core)
add_object_library(clickhouse_compression src/Compression/)
add_object_library(clickhouse_datastreams src/DataStreams)
add_object_library(clickhouse_datatypes src/DataTypes)
add_object_library(clickhouse_databases src/Databases)
add_object_library(clickhouse_interpreters src/Interpreters)
add_object_library(clickhouse_interpreters_clusterproxy src/Interpreters/ClusterProxy)
add_object_library(clickhouse_columns src/Columns)
add_object_library(clickhouse_storages src/Storages)
add_object_library(clickhouse_storages_distributed src/Storages/Distributed)
add_object_library(clickhouse_storages_mergetree src/Storages/MergeTree)
add_object_library(clickhouse_storages_liveview src/Storages/LiveView)
add_object_library(clickhouse_client src/Client)
add_object_library(clickhouse_formats src/Formats)
add_object_library(clickhouse_processors src/Processors)
add_object_library(clickhouse_processors_executors src/Processors/Executors)
add_object_library(clickhouse_processors_formats src/Processors/Formats)
add_object_library(clickhouse_processors_formats_impl src/Processors/Formats/Impl)
add_object_library(clickhouse_processors_transforms src/Processors/Transforms)
add_object_library(clickhouse_processors_sources src/Processors/Sources)
if (MAKE_STATIC_LIBRARIES OR NOT SPLIT_SHARED_LIBRARIES)
add_library(dbms ${dbms_headers} ${dbms_sources})
else ()
add_library(dbms SHARED ${dbms_headers} ${dbms_sources})
add_library (dbms STATIC ${dbms_headers} ${dbms_sources})
set (all_modules dbms)
else()
add_library (dbms SHARED ${dbms_headers} ${dbms_sources})
target_link_libraries (dbms PUBLIC ${all_modules})
list (APPEND all_modules dbms)
# force all split libs to be linked
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--no-as-needed")
endif ()
macro (dbms_target_include_directories)
foreach (module ${all_modules})
target_include_directories (${module} ${ARGN})
endforeach ()
endmacro ()
macro (dbms_target_link_libraries)
foreach (module ${all_modules})
target_link_libraries (${module} ${ARGN})
endforeach ()
endmacro ()
if (USE_EMBEDDED_COMPILER)
llvm_libs_all(REQUIRED_LLVM_LIBRARIES)
target_link_libraries (dbms PRIVATE ${REQUIRED_LLVM_LIBRARIES})
target_include_directories (dbms SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
dbms_target_link_libraries (PRIVATE ${REQUIRED_LLVM_LIBRARIES})
dbms_target_include_directories (SYSTEM BEFORE PUBLIC ${LLVM_INCLUDE_DIRS})
endif ()
if (CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE" OR CMAKE_BUILD_TYPE_UC STREQUAL "RELWITHDEBINFO" OR CMAKE_BUILD_TYPE_UC STREQUAL "MINSIZEREL")
@ -245,9 +271,9 @@ target_link_libraries(clickhouse_common_io
)
if (USE_RDKAFKA)
target_link_libraries(dbms PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY})
dbms_target_link_libraries(PRIVATE ${CPPKAFKA_LIBRARY} ${RDKAFKA_LIBRARY})
if(NOT USE_INTERNAL_RDKAFKA_LIBRARY)
target_include_directories(dbms SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${RDKAFKA_INCLUDE_DIR})
endif()
endif()
@ -264,7 +290,7 @@ if(CPUINFO_LIBRARY)
target_link_libraries(clickhouse_common_io PRIVATE ${CPUINFO_LIBRARY})
endif()
target_link_libraries (dbms
dbms_target_link_libraries (
PRIVATE
clickhouse_parsers
clickhouse_common_config
@ -285,21 +311,24 @@ target_link_libraries (dbms
${Boost_SYSTEM_LIBRARY}
)
target_include_directories(dbms PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include)
target_include_directories(clickhouse_common_io PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include) # uses some includes from core
target_include_directories(dbms SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR})
target_include_directories(dbms SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR})
dbms_target_include_directories(PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/src/Core/include)
target_include_directories(clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM BEFORE PUBLIC ${PDQSORT_INCLUDE_DIR})
if (NOT USE_INTERNAL_LZ4_LIBRARY)
target_include_directories(dbms SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR})
dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${LZ4_INCLUDE_DIR})
endif ()
if (ZSTD_LIBRARY)
target_link_libraries(dbms PRIVATE ${ZSTD_LIBRARY})
dbms_target_link_libraries(PRIVATE ${ZSTD_LIBRARY})
if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR)
dbms_target_include_directories(SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR})
endif ()
endif()
if (NOT USE_INTERNAL_ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR)
target_include_directories(dbms SYSTEM BEFORE PRIVATE ${ZSTD_INCLUDE_DIR})
endif ()
if (NOT USE_INTERNAL_BOOST_LIBRARY)
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${Boost_INCLUDE_DIRS})
@ -307,68 +336,69 @@ endif ()
if (Poco_SQL_FOUND AND NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
target_include_directories (dbms SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM PRIVATE ${Poco_SQL_INCLUDE_DIR})
endif()
if (USE_POCO_SQLODBC)
target_link_libraries (clickhouse_common_io PRIVATE ${Poco_SQL_LIBRARY})
target_link_libraries (dbms PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY})
dbms_target_link_libraries (PRIVATE ${Poco_SQLODBC_LIBRARY} ${Poco_SQL_LIBRARY})
if (NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQL_INCLUDE_DIR})
target_include_directories (dbms SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR} SYSTEM PUBLIC ${Poco_SQL_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_SQLODBC_INCLUDE_DIR} SYSTEM PUBLIC ${Poco_SQL_INCLUDE_DIR})
endif()
endif()
if (Poco_Data_FOUND)
target_include_directories (clickhouse_common_io SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR})
target_include_directories (dbms SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM PRIVATE ${Poco_Data_INCLUDE_DIR})
endif()
if (USE_POCO_DATAODBC)
target_link_libraries (clickhouse_common_io PRIVATE ${Poco_Data_LIBRARY})
target_link_libraries (dbms PRIVATE ${Poco_DataODBC_LIBRARY})
dbms_target_link_libraries (PRIVATE ${Poco_DataODBC_LIBRARY})
if (NOT USE_INTERNAL_POCO_LIBRARY)
target_include_directories (dbms SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM PRIVATE ${ODBC_INCLUDE_DIRS} ${Poco_DataODBC_INCLUDE_DIR})
endif()
endif()
if (USE_POCO_MONGODB)
target_link_libraries (dbms PRIVATE ${Poco_MongoDB_LIBRARY})
dbms_target_link_libraries (PRIVATE ${Poco_MongoDB_LIBRARY})
endif()
if (USE_POCO_NETSSL)
target_link_libraries (clickhouse_common_io PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
target_link_libraries (dbms PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
dbms_target_link_libraries (PRIVATE ${Poco_NetSSL_LIBRARY} ${Poco_Crypto_LIBRARY})
endif()
target_link_libraries (dbms PRIVATE ${Poco_Foundation_LIBRARY})
dbms_target_link_libraries (PRIVATE ${Poco_Foundation_LIBRARY})
if (USE_ICU)
target_link_libraries (dbms PRIVATE ${ICU_LIBRARIES})
target_include_directories (dbms SYSTEM PRIVATE ${ICU_INCLUDE_DIRS})
dbms_target_link_libraries (PRIVATE ${ICU_LIBRARIES})
dbms_target_include_directories (SYSTEM PRIVATE ${ICU_INCLUDE_DIRS})
endif ()
if (USE_CAPNP)
target_link_libraries (dbms PRIVATE ${CAPNP_LIBRARIES})
dbms_target_link_libraries (PRIVATE ${CAPNP_LIBRARIES})
endif ()
if (USE_PARQUET)
target_link_libraries(dbms PRIVATE ${PARQUET_LIBRARY})
dbms_target_link_libraries(PRIVATE ${PARQUET_LIBRARY})
if (NOT USE_INTERNAL_PARQUET_LIBRARY OR USE_INTERNAL_PARQUET_LIBRARY_NATIVE_CMAKE)
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${PARQUET_INCLUDE_DIR} ${ARROW_INCLUDE_DIR})
endif ()
endif ()
if(OPENSSL_CRYPTO_LIBRARY)
target_link_libraries(dbms PRIVATE ${OPENSSL_CRYPTO_LIBRARY})
if (OPENSSL_CRYPTO_LIBRARY)
dbms_target_link_libraries (PRIVATE ${OPENSSL_CRYPTO_LIBRARY})
target_link_libraries (clickhouse_common_io PRIVATE ${OPENSSL_CRYPTO_LIBRARY})
endif ()
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR})
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${DIVIDE_INCLUDE_DIR})
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${SPARCEHASH_INCLUDE_DIR})
if (USE_PROTOBUF)
target_link_libraries (dbms PRIVATE ${Protobuf_LIBRARY})
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR})
dbms_target_link_libraries (PRIVATE ${Protobuf_LIBRARY})
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${Protobuf_INCLUDE_DIR})
endif ()
if (USE_HDFS)
@ -382,13 +412,13 @@ if (USE_BROTLI)
endif()
if (USE_JEMALLOC)
target_include_directories (dbms SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) # used in Interpreters/AsynchronousMetrics.cpp
dbms_target_include_directories (SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) # used in Interpreters/AsynchronousMetrics.cpp
target_include_directories (clickhouse_common_io SYSTEM BEFORE PRIVATE ${JEMALLOC_INCLUDE_DIR}) # new_delete.cpp
endif ()
target_include_directories (dbms PUBLIC ${DBMS_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src/Formats/include)
dbms_target_include_directories (PUBLIC ${DBMS_INCLUDE_DIR} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/src/Formats/include)
target_include_directories (clickhouse_common_io PUBLIC ${DBMS_INCLUDE_DIR})
target_include_directories (clickhouse_common_io SYSTEM PUBLIC ${PCG_RANDOM_INCLUDE_DIR})
target_include_directories (clickhouse_common_io SYSTEM BEFORE PUBLIC ${DOUBLE_CONVERSION_INCLUDE_DIR})
# also for copy_headers.sh:

View File

@ -24,9 +24,9 @@ configure_file (config_tools.h.in ${CMAKE_CURRENT_BINARY_DIR}/config_tools.h)
macro(clickhouse_target_link_split_lib target name)
if(NOT CLICKHOUSE_ONE_SHARED)
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib)
target_link_libraries(${target} PRIVATE clickhouse-${name}-lib ${MALLOC_LIBRARIES})
else()
target_link_libraries(${target} PRIVATE clickhouse-lib)
target_link_libraries(${target} PRIVATE clickhouse-lib ${MALLOC_LIBRARIES})
endif()
endmacro()
@ -111,7 +111,7 @@ if (CLICKHOUSE_SPLIT_BINARY)
install(PROGRAMS clickhouse-split-helper DESTINATION ${CMAKE_INSTALL_BINDIR} RENAME clickhouse COMPONENT clickhouse)
else ()
add_executable (clickhouse main.cpp)
target_link_libraries (clickhouse PRIVATE clickhouse_common_io string_utils)
target_link_libraries (clickhouse PRIVATE clickhouse_common_io string_utils ${MALLOC_LIBRARIES})
target_include_directories (clickhouse BEFORE PRIVATE ${COMMON_INCLUDE_DIR})
target_include_directories (clickhouse PRIVATE ${CMAKE_CURRENT_BINARY_DIR})

View File

@ -563,9 +563,17 @@ private:
if (is_interactive)
{
std::cout << "Connected to " << server_name
<< " server version " << server_version
<< " revision " << server_revision
<< "." << std::endl << std::endl;
<< " server version " << server_version
<< " revision " << server_revision
<< "." << std::endl << std::endl;
if (std::make_tuple(VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
< std::make_tuple(server_version_major, server_version_minor, server_version_patch))
{
std::cout << "ClickHouse client version is older than ClickHouse server. "
<< "It may lack support for new features."
<< std::endl << std::endl;
}
}
}

View File

@ -8,7 +8,6 @@
#include <vector>
#include <algorithm>
#include <ext/singleton.h>
#include <common/readline_use.h>
#include <Common/typeid_cast.h>
@ -25,7 +24,7 @@ namespace ErrorCodes
extern const int UNKNOWN_PACKET_FROM_SERVER;
}
class Suggest : public ext::singleton<Suggest>
class Suggest : private boost::noncopyable
{
private:
/// The vector will be filled with completion words from the server and sorted.
@ -161,6 +160,12 @@ private:
}
public:
static Suggest & instance()
{
static Suggest instance;
return instance;
}
/// More old server versions cannot execute the query above.
static constexpr int MIN_SERVER_REVISION = 54406;

View File

@ -1,5 +1,5 @@
set(CLICKHOUSE_COPIER_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/ClusterCopier.cpp)
set(CLICKHOUSE_COPIER_LINK PRIVATE clickhouse_common_zookeeper clickhouse_parsers clickhouse_functions clickhouse_table_functions clickhouse_aggregate_functions clickhouse_dictionaries string_utils PUBLIC daemon)
set(CLICKHOUSE_COPIER_LINK PRIVATE clickhouse_common_zookeeper clickhouse_parsers clickhouse_functions clickhouse_table_functions clickhouse_aggregate_functions clickhouse_dictionaries string_utils ${Poco_XML_LIBRARY} PUBLIC daemon)
set(CLICKHOUSE_COPIER_INCLUDE SYSTEM PRIVATE ${PCG_RANDOM_INCLUDE_DIR})
clickhouse_program_add(copier)

View File

@ -35,8 +35,7 @@ clickhouse_program_add_library(odbc-bridge)
# clickhouse-odbc-bridge is always a separate binary.
# Reason: it must not export symbols from SSL, mariadb-client, etc. to not break ABI compatibility with ODBC drivers.
# For this reason, we disabling -rdynamic linker flag. But we do it in strange way:
SET(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--no-export-dynamic")
add_executable(clickhouse-odbc-bridge odbc-bridge.cpp)
set_target_properties(clickhouse-odbc-bridge PROPERTIES RUNTIME_OUTPUT_DIRECTORY ..)

View File

@ -1,3 +1,5 @@
#include <Common/config.h>
#if USE_POCO_NETSSL
#include "MySQLHandler.h"
#include <limits>
@ -301,3 +303,4 @@ void MySQLHandler::comQuery(ReadBuffer & payload)
}
}
#endif

View File

@ -1,4 +1,6 @@
#pragma once
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Poco/Net/TCPServerConnection.h>
#include <Poco/Net/SecureStreamSocket.h>
@ -56,3 +58,4 @@ private:
};
}
#endif

View File

@ -1,3 +1,5 @@
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Common/OpenSSLHelpers.h>
#include <Poco/Crypto/X509Certificate.h>
#include <Poco/Net/SSLManager.h>
@ -122,3 +124,4 @@ Poco::Net::TCPServerConnection * MySQLHandlerFactory::createConnection(const Poc
}
}
#endif

View File

@ -1,5 +1,8 @@
#pragma once
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Poco/Net/TCPServerConnectionFactory.h>
#include <atomic>
#include <openssl/rsa.h>
@ -37,3 +40,4 @@ public:
};
}
#endif

View File

@ -26,4 +26,10 @@ AggregateFunctionCombinatorPtr AggregateFunctionCombinatorFactory::tryFindSuffix
return {};
}
AggregateFunctionCombinatorFactory & AggregateFunctionCombinatorFactory::instance()
{
static AggregateFunctionCombinatorFactory ret;
return ret;
}
}

View File

@ -2,7 +2,6 @@
#include <AggregateFunctions/IAggregateFunctionCombinator.h>
#include <ext/singleton.h>
#include <string>
#include <unordered_map>
@ -13,13 +12,16 @@ namespace DB
/** Create aggregate function combinator by matching suffix in aggregate function name.
*/
class AggregateFunctionCombinatorFactory final: public ext::singleton<AggregateFunctionCombinatorFactory>
class AggregateFunctionCombinatorFactory final: private boost::noncopyable
{
private:
using Dict = std::unordered_map<std::string, AggregateFunctionCombinatorPtr>;
Dict dict;
public:
static AggregateFunctionCombinatorFactory & instance();
/// Not thread safe. You must register before using tryGet.
void registerCombinator(const AggregateFunctionCombinatorPtr & value);

View File

@ -160,4 +160,10 @@ bool AggregateFunctionFactory::isAggregateFunctionName(const String & name, int
return false;
}
AggregateFunctionFactory & AggregateFunctionFactory::instance()
{
static AggregateFunctionFactory ret;
return ret;
}
}

View File

@ -3,7 +3,6 @@
#include <AggregateFunctions/IAggregateFunction.h>
#include <Common/IFactoryWithAliases.h>
#include <ext/singleton.h>
#include <functional>
#include <memory>
@ -30,9 +29,12 @@ using AggregateFunctionCreator = std::function<AggregateFunctionPtr(const String
/** Creates an aggregate function by name.
*/
class AggregateFunctionFactory final : public ext::singleton<AggregateFunctionFactory>, public IFactoryWithAliases<AggregateFunctionCreator>
class AggregateFunctionFactory final : private boost::noncopyable, public IFactoryWithAliases<AggregateFunctionCreator>
{
public:
static AggregateFunctionFactory & instance();
/// Register a function by its name.
/// No locking, you must register all functions before usage of get.
void registerFunction(

View File

@ -88,10 +88,11 @@ public:
if (is_large)
{
toLarge();
UInt32 cardinality;
readBinary(cardinality, in);
db_roaring_bitmap_add_many(in, rb, cardinality);
std::string s;
readStringBinary(s,in);
rb = roaring_bitmap_portable_deserialize(s.c_str());
for (const auto & x : small) //merge from small
roaring_bitmap_add(rb, x.getValue());
}
else
small.read(in);
@ -103,9 +104,10 @@ public:
if (isLarge())
{
UInt32 cardinality = roaring_bitmap_get_cardinality(rb);
writePODBinary(cardinality, out);
db_ra_to_uint32_array(out, &rb->high_low_container);
uint32_t expectedsize = roaring_bitmap_portable_size_in_bytes(rb);
std::string s(expectedsize,0);
roaring_bitmap_portable_serialize(rb, const_cast<char*>(s.data()));
writeStringBinary(s,out);
}
else
small.write(out);

View File

@ -60,12 +60,6 @@ struct HashMethodOneNumber
/// Is used for default implementation in HashMethodBase.
FieldType getKeyHolder(size_t row, Arena &) const { return unalignedLoad<FieldType>(vec + row * sizeof(FieldType)); }
/// Get StringRef from value which can be inserted into column.
static StringRef getValueRef(const Value & value)
{
return StringRef(reinterpret_cast<const char *>(&value.first), sizeof(value.first));
}
};
@ -102,8 +96,6 @@ struct HashMethodString
}
}
static StringRef getValueRef(const Value & value) { return value.first; }
protected:
friend class columns_hashing_impl::HashMethodBase<Self, Value, Mapped, use_cache>;
};
@ -142,8 +134,6 @@ struct HashMethodFixedString
}
}
static StringRef getValueRef(const Value & value) { return value.first; }
protected:
friend class columns_hashing_impl::HashMethodBase<Self, Value, Mapped, use_cache>;
};
@ -560,11 +550,6 @@ struct HashMethodHashed
{
return hash128(row, key_columns.size(), key_columns);
}
static ALWAYS_INLINE StringRef getValueRef(const Value & value)
{
return StringRef(reinterpret_cast<const char *>(&value.first), sizeof(value.first));
}
};
}

View File

@ -223,5 +223,10 @@ void DNSResolver::addToNewHosts(const String & host)
DNSResolver::~DNSResolver() = default;
DNSResolver & DNSResolver::instance()
{
static DNSResolver ret;
return ret;
}
}

View File

@ -2,20 +2,21 @@
#include <Poco/Net/IPAddress.h>
#include <Poco/Net/SocketAddress.h>
#include <memory>
#include <ext/singleton.h>
#include <Core/Types.h>
#include <Core/Names.h>
#include <boost/noncopyable.hpp>
namespace DB
{
/// A singleton implementing DNS names resolving with optional DNS cache
/// The cache is being updated asynchronous in separate thread (see DNSCacheUpdater)
/// The cache is being updated asynchronous in separate thread (see DNSCacheUpdater)
/// or it could be updated manually via drop() method.
class DNSResolver : public ext::singleton<DNSResolver>
class DNSResolver : private boost::noncopyable
{
public:
static DNSResolver & instance();
DNSResolver(const DNSResolver &) = delete;
@ -46,8 +47,6 @@ private:
DNSResolver();
friend class ext::singleton<DNSResolver>;
struct Impl;
std::unique_ptr<Impl> impl;

View File

@ -36,7 +36,6 @@ struct FixedClearableHashMapCell
}
Key key;
FixedClearableHashMapCell * ptr;
Key & getFirstMutable() { return key; }
const Key & getFirst() const { return key; }
Mapped & getSecond() { return ptr->mapped; }
const Mapped & getSecond() const { return *ptr->mapped; }

View File

@ -23,7 +23,6 @@ struct FixedClearableHashTableCell
struct CellExt
{
Key key;
value_type & getValueMutable() { return key; }
const value_type & getValue() const { return key; }
void update(Key && key_, FixedClearableHashTableCell *) { key = key_; }
};

View File

@ -39,7 +39,6 @@ struct FixedHashMapCell
Key key;
FixedHashMapCell * ptr;
Key & getFirstMutable() { return key; }
const Key & getFirst() const { return key; }
Mapped & getSecond() { return ptr->mapped; }
const Mapped & getSecond() const { return ptr->mapped; }
@ -53,12 +52,53 @@ class FixedHashMap : public FixedHashTable<Key, FixedHashMapCell<Key, Mapped>, A
{
public:
using Base = FixedHashTable<Key, FixedHashMapCell<Key, Mapped>, Allocator>;
using Self = FixedHashMap;
using key_type = Key;
using mapped_type = Mapped;
using value_type = typename Base::cell_type::value_type;
using Cell = typename Base::cell_type;
using value_type = typename Cell::value_type;
using Base::Base;
template <typename Func>
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
{
for (auto it = this->begin(), end = this->end(); it != end; ++it)
{
decltype(it) res_it;
bool inserted;
that.emplace(it->getFirst(), res_it, inserted, it.getHash());
func(res_it->getSecond(), it->getSecond(), inserted);
}
}
template <typename Func>
void ALWAYS_INLINE mergeToViaFind(Self & that, Func && func)
{
for (auto it = this->begin(), end = this->end(); it != end; ++it)
{
decltype(it) res_it = that.find(it->getFirst(), it.getHash());
if (res_it == that.end())
func(it->getSecond(), it->getSecond(), false);
else
func(res_it->getSecond(), it->getSecond(), true);
}
}
template <typename Func>
void forEachValue(Func && func)
{
for (auto & v : *this)
func(v.getFirst(), v.getSecond());
}
template <typename Func>
void forEachMapped(Func && func)
{
for (auto & v : *this)
func(v.getSecond());
}
mapped_type & ALWAYS_INLINE operator[](Key x)
{
typename Base::iterator it;

View File

@ -28,7 +28,6 @@ struct FixedHashTableCell
{
Key key;
value_type & getValueMutable() { return key; }
const value_type & getValue() const { return key; }
void update(Key && key_, FixedHashTableCell *) { key = key_; }
};

View File

@ -49,12 +49,10 @@ struct HashMapCell
HashMapCell(const Key & key_, const State &) : value(key_, NoInitTag()) {}
HashMapCell(const value_type & value_, const State &) : value(value_) {}
Key & getFirstMutable() { return value.first; }
const Key & getFirst() const { return value.first; }
Mapped & getSecond() { return value.second; }
const Mapped & getSecond() const { return value.second; }
value_type & getValueMutable() { return value; }
const value_type & getValue() const { return value; }
static const Key & getKey(const value_type & value) { return value.first; }
@ -137,12 +135,65 @@ template <
class HashMapTable : public HashTable<Key, Cell, Hash, Grower, Allocator>
{
public:
using Self = HashMapTable;
using key_type = Key;
using mapped_type = typename Cell::Mapped;
using value_type = typename Cell::value_type;
using HashTable<Key, Cell, Hash, Grower, Allocator>::HashTable;
/// Merge every cell's value of current map into the destination map via emplace.
/// Func should have signature void(Mapped & dst, Mapped & src, bool emplaced).
/// Each filled cell in current map will invoke func once. If that map doesn't
/// have a key equals to the given cell, a new cell gets emplaced into that map,
/// and func is invoked with the third argument emplaced set to true. Otherwise
/// emplaced is set to false.
template <typename Func>
void ALWAYS_INLINE mergeToViaEmplace(Self & that, Func && func)
{
for (auto it = this->begin(), end = this->end(); it != end; ++it)
{
decltype(it) res_it;
bool inserted;
that.emplace(it->getFirst(), res_it, inserted, it.getHash());
func(res_it->getSecond(), it->getSecond(), inserted);
}
}
/// Merge every cell's value of current map into the destination map via find.
/// Func should have signature void(Mapped & dst, Mapped & src, bool exist).
/// Each filled cell in current map will invoke func once. If that map doesn't
/// have a key equals to the given cell, func is invoked with the third argument
/// exist set to false. Otherwise exist is set to true.
template <typename Func>
void ALWAYS_INLINE mergeToViaFind(Self & that, Func && func)
{
for (auto it = this->begin(), end = this->end(); it != end; ++it)
{
decltype(it) res_it = that.find(it->getFirst(), it.getHash());
if (res_it == that.end())
func(it->getSecond(), it->getSecond(), false);
else
func(res_it->getSecond(), it->getSecond(), true);
}
}
/// Call func(const Key &, Mapped &) for each hash map element.
template <typename Func>
void forEachValue(Func && func)
{
for (auto & v : *this)
func(v.getFirst(), v.getSecond());
}
/// Call func(Mapped &) for each hash map element.
template <typename Func>
void forEachMapped(Func && func)
{
for (auto & v : *this)
func(v.getSecond());
}
mapped_type & ALWAYS_INLINE operator[](Key x)
{
typename HashMapTable::iterator it;

View File

@ -98,7 +98,6 @@ struct HashTableCell
HashTableCell(const Key & key_, const State &) : key(key_) {}
/// Get what the value_type of the container will be.
value_type & getValueMutable() { return key; }
const value_type & getValue() const { return key; }
/// Get the key.

View File

@ -22,6 +22,13 @@ public:
using TwoLevelHashTable<Key, Cell, Hash, Grower, Allocator, ImplTable<Key, Cell, Hash, Grower, Allocator>>::TwoLevelHashTable;
template <typename Func>
void ALWAYS_INLINE forEachMapped(Func && func)
{
for (auto i = 0u; i < this->NUM_BUCKETS; ++i)
this->impls[i].forEachMapped(func);
}
mapped_type & ALWAYS_INLINE operator[](Key x)
{
typename TwoLevelHashMapTable::iterator it;

View File

@ -1,3 +1,5 @@
#include <Common/config.h>
#if USE_POCO_NETSSL
#include "OpenSSLHelpers.h"
#include <ext/scope_guard.h>
#include <openssl/err.h>
@ -16,3 +18,4 @@ String getOpenSSLErrors()
}
}
#endif

View File

@ -1,4 +1,6 @@
#pragma once
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Core/Types.h>
@ -10,3 +12,4 @@ namespace DB
String getOpenSSLErrors();
}
#endif

View File

@ -4,8 +4,6 @@
#include <Common/CurrentMetrics.h>
#include <Common/ProfileEvents.h>
#include <cassert>
namespace ProfileEvents
{
@ -35,22 +33,48 @@ namespace ErrorCodes
}
/** A single-use object that represents lock's ownership
* For the purpose of exception safety guarantees LockHolder is to be used in two steps:
* 1. Create an instance (allocating all the memory needed)
* 2. Associate the instance with the lock (attach to the lock and locking request group)
*/
class RWLockImpl::LockHolderImpl
{
bool bound{false};
Type lock_type;
String query_id;
CurrentMetrics::Increment active_client_increment;
RWLock parent;
GroupsContainer::iterator it_group;
ClientsContainer::iterator it_client;
QueryIdToHolder::key_type query_id;
CurrentMetrics::Increment active_client_increment;
LockHolderImpl(RWLock && parent, GroupsContainer::iterator it_group, ClientsContainer::iterator it_client);
public:
LockHolderImpl(const LockHolderImpl & other) = delete;
LockHolderImpl& operator=(const LockHolderImpl & other) = delete;
/// Implicit memory allocation for query_id is done here
LockHolderImpl(const String & query_id_, Type type)
: lock_type{type}, query_id{query_id_},
active_client_increment{
type == Type::Read ? CurrentMetrics::RWLockActiveReaders : CurrentMetrics::RWLockActiveWriters}
{
}
~LockHolderImpl();
private:
/// A separate method which binds the lock holder to the owned lock
/// N.B. It is very important that this method produces no allocations
bool bind_with(RWLock && parent_, GroupsContainer::iterator it_group_) noexcept
{
if (bound)
return false;
it_group = it_group_;
parent = std::move(parent_);
++it_group->refererrs;
bound = true;
return true;
}
friend class RWLockImpl;
};
@ -62,29 +86,33 @@ namespace
class QueryLockInfo
{
private:
std::mutex mutex;
mutable std::mutex mutex;
std::map<std::string, size_t> queries;
public:
void add(const String & query_id)
{
std::lock_guard lock(mutex);
++queries[query_id];
const auto res = queries.emplace(query_id, 1); // may throw
if (!res.second)
++res.first->second;
}
void remove(const String & query_id)
void remove(const String & query_id) noexcept
{
std::lock_guard lock(mutex);
auto it = queries.find(query_id);
assert(it != queries.end());
if (--it->second == 0)
queries.erase(it);
const auto query_it = queries.find(query_id);
if (query_it != queries.cend() && --query_it->second == 0)
queries.erase(query_it);
}
void check(const String & query_id)
void check(const String & query_id) const
{
std::lock_guard lock(mutex);
if (queries.count(query_id))
if (queries.find(query_id) != queries.cend())
throw Exception("Possible deadlock avoided. Client should retry.", ErrorCodes::DEADLOCK_AVOIDED);
}
};
@ -93,8 +121,16 @@ namespace
}
/** To guarantee that we do not get any piece of our data corrupted:
* 1. Perform all actions that include allocations before changing lock's internal state
* 2. Roll back any changes that make the state inconsistent
*
* Note: "SM" in the commentaries below stands for STATE MODIFICATION
*/
RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String & query_id)
{
const bool request_has_query_id = query_id != NO_QUERY;
Stopwatch watch(CLOCK_MONOTONIC_COARSE);
CurrentMetrics::Increment waiting_client_increment((type == Read) ? CurrentMetrics::RWLockWaitingReaders
: CurrentMetrics::RWLockWaitingWriters);
@ -106,29 +142,39 @@ RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String &
: ProfileEvents::RWLockWritersWaitMilliseconds, watch.elapsedMilliseconds());
};
GroupsContainer::iterator it_group;
ClientsContainer::iterator it_client;
/// This object is placed above unique_lock, because it may lock in destructor.
LockHolder res;
auto lock_holder = std::make_shared<LockHolderImpl>(query_id, type);
std::unique_lock lock(mutex);
/// Check if the same query is acquiring previously acquired lock
if (query_id != RWLockImpl::NO_QUERY)
/// The FastPath:
/// Check if the same query_id already holds the required lock in which case we can proceed without waiting
if (request_has_query_id)
{
auto it_query = query_id_to_holder.find(query_id);
if (it_query != query_id_to_holder.end())
res = it_query->second.lock();
}
const auto it_query = owner_queries.find(query_id);
if (it_query != owner_queries.end())
{
const auto current_owner_group = queue.begin();
if (res)
{
/// XXX: it means we can't upgrade lock from read to write - with proper waiting!
if (type != Read || res->it_group->type != Read)
throw Exception("Attempt to acquire exclusive lock recursively", ErrorCodes::LOGICAL_ERROR);
else
return res;
/// XXX: it means we can't upgrade lock from read to write!
if (type == Write)
throw Exception(
"RWLockImpl::getLock(): Cannot acquire exclusive lock while RWLock is already locked",
ErrorCodes::LOGICAL_ERROR);
if (current_owner_group->type == Write)
throw Exception(
"RWLockImpl::getLock(): RWLock is already locked in exclusive mode",
ErrorCodes::LOGICAL_ERROR);
/// N.B. Type is Read here, query_id is not empty and it_query is a valid iterator
all_read_locks.add(query_id); /// SM1: may throw on insertion (nothing to roll back)
++it_query->second; /// SM2: nothrow
lock_holder->bind_with(shared_from_this(), current_owner_group); /// SM3: nothrow
finalize_metrics();
return lock_holder;
}
}
/** If the query already has any active read lock and tries to acquire another read lock
@ -148,86 +194,106 @@ RWLockImpl::LockHolder RWLockImpl::getLock(RWLockImpl::Type type, const String &
if (type == Type::Write || queue.empty() || queue.back().type == Type::Write)
{
if (type == Type::Read && !queue.empty() && queue.back().type == Type::Write && query_id != RWLockImpl::NO_QUERY)
if (type == Type::Read && request_has_query_id && !queue.empty())
all_read_locks.check(query_id);
/// Create new group of clients
it_group = queue.emplace(queue.end(), type);
/// Create a new group of locking requests
queue.emplace_back(type); /// SM1: may throw (nothing to roll back)
}
else
{
/// Will append myself to last group
it_group = std::prev(queue.end());
else if (request_has_query_id && queue.size() > 1)
all_read_locks.check(query_id);
if (it_group != queue.begin() && query_id != RWLockImpl::NO_QUERY)
all_read_locks.check(query_id);
}
GroupsContainer::iterator it_group = std::prev(queue.end());
/// Append myself to the end of chosen group
auto & clients = it_group->clients;
try
{
it_client = clients.emplace(clients.end(), type);
}
catch (...)
{
/// Remove group if it was the first client in the group and an error occurred
if (clients.empty())
queue.erase(it_group);
throw;
}
res.reset(new LockHolderImpl(shared_from_this(), it_group, it_client));
/// We need to reference the associated group before waiting to guarantee
/// that this group does not get deleted prematurely
++it_group->refererrs;
/// Wait a notification until we will be the only in the group.
it_group->cv.wait(lock, [&] () { return it_group == queue.begin(); });
/// Insert myself (weak_ptr to the holder) to queries set to implement recursive lock
if (query_id != RWLockImpl::NO_QUERY)
{
query_id_to_holder.emplace(query_id, res);
--it_group->refererrs;
if (type == Type::Read)
all_read_locks.add(query_id);
if (request_has_query_id)
{
try
{
if (type == Type::Read)
all_read_locks.add(query_id); /// SM2: may throw on insertion
/// and is safe to roll back unconditionally
const auto emplace_res =
owner_queries.emplace(query_id, 1); /// SM3: may throw on insertion
if (!emplace_res.second)
++emplace_res.first->second; /// SM4: nothrow
}
catch (...)
{
/// Methods std::list<>::emplace_back() and std::unordered_map<>::emplace() provide strong exception safety
/// We only need to roll back the changes to these objects: all_read_locks and the locking queue
if (type == Type::Read)
all_read_locks.remove(query_id); /// Rollback(SM2): nothrow
if (it_group->refererrs == 0)
{
const auto next = queue.erase(it_group); /// Rollback(SM1): nothrow
if (next != queue.end())
next->cv.notify_all();
}
throw;
}
}
res->query_id = query_id;
lock_holder->bind_with(shared_from_this(), it_group); /// SM: nothrow
finalize_metrics();
return res;
return lock_holder;
}
/** The sequence points of acquiring lock's ownership by an instance of LockHolderImpl:
* 1. all_read_locks is updated
* 2. owner_queries is updated
* 3. request group is updated by LockHolderImpl which in turn becomes "bound"
*
* If by the time when destructor of LockHolderImpl is called the instance has been "bound",
* it is guaranteed that all three steps have been executed successfully and the resulting state is consistent.
* With the mutex locked the order of steps to restore the lock's state can be arbitrary
*
* We do not employ try-catch: if something bad happens, there is nothing we can do =(
*/
RWLockImpl::LockHolderImpl::~LockHolderImpl()
{
if (!bound || parent == nullptr)
return;
std::lock_guard lock(parent->mutex);
/// Remove weak_ptrs to the holder, since there are no owners of the current lock
parent->query_id_to_holder.erase(query_id);
/// The associated group must exist (and be the beginning of the queue?)
if (parent->queue.empty() || it_group != parent->queue.begin())
return;
if (*it_client == RWLockImpl::Read && query_id != RWLockImpl::NO_QUERY)
all_read_locks.remove(query_id);
/// Removes myself from client list of our group
it_group->clients.erase(it_client);
/// Remove the group if we were the last client and notify the next group
if (it_group->clients.empty())
/// If query_id is not empty it must be listed in parent->owner_queries
if (query_id != RWLockImpl::NO_QUERY)
{
auto & parent_queue = parent->queue;
parent_queue.erase(it_group);
const auto owner_it = parent->owner_queries.find(query_id);
if (owner_it != parent->owner_queries.end())
{
if (--owner_it->second == 0) /// SM: nothrow
parent->owner_queries.erase(owner_it); /// SM: nothrow
if (!parent_queue.empty())
parent_queue.front().cv.notify_all();
if (lock_type == RWLockImpl::Read)
all_read_locks.remove(query_id); /// SM: nothrow
}
}
/// If we are the last remaining referrer, remove the group and notify the next group
if (--it_group->refererrs == 0) /// SM: nothrow
{
const auto next = parent->queue.erase(it_group); /// SM: nothrow
if (next != parent->queue.end())
next->cv.notify_all();
}
}
RWLockImpl::LockHolderImpl::LockHolderImpl(RWLock && parent_, RWLockImpl::GroupsContainer::iterator it_group_,
RWLockImpl::ClientsContainer::iterator it_client_)
: parent{std::move(parent_)}, it_group{it_group_}, it_client{it_client_},
active_client_increment{(*it_client == RWLockImpl::Read) ? CurrentMetrics::RWLockActiveReaders
: CurrentMetrics::RWLockActiveWriters}
{
}
}

View File

@ -8,6 +8,7 @@
#include <condition_variable>
#include <map>
#include <string>
#include <unordered_map>
namespace DB
@ -53,25 +54,24 @@ private:
struct Group;
using GroupsContainer = std::list<Group>;
using ClientsContainer = std::list<Type>;
using QueryIdToHolder = std::map<String, std::weak_ptr<LockHolderImpl>>;
using OwnerQueryIds = std::unordered_map<String, size_t>;
/// Group of clients that should be executed concurrently
/// i.e. a group could contain several readers, but only one writer
/// Group of locking requests that should be granted concurrently
/// i.e. a group can contain several readers, but only one writer
struct Group
{
// FIXME: there is only redundant |type| information inside |clients|.
const Type type;
ClientsContainer clients;
size_t refererrs;
std::condition_variable cv; /// all clients of the group wait group condvar
std::condition_variable cv; /// all locking requests of the group wait on this condvar
explicit Group(Type type_) : type{type_} {}
explicit Group(Type type_) : type{type_}, refererrs{0} {}
};
mutable std::mutex mutex;
GroupsContainer queue;
QueryIdToHolder query_id_to_holder;
OwnerQueryIds owner_queries;
mutable std::mutex mutex;
};

View File

@ -361,6 +361,12 @@ const SymbolIndex::Object * SymbolIndex::findObject(const void * address) const
return find(address, data.objects);
}
SymbolIndex & SymbolIndex::instance()
{
static SymbolIndex instance;
return instance;
}
}
#endif

View File

@ -4,8 +4,8 @@
#include <vector>
#include <string>
#include <ext/singleton.h>
#include <Common/Elf.h>
#include <boost/noncopyable.hpp>
namespace DB
@ -15,13 +15,14 @@ namespace DB
* Used as a replacement for "dladdr" function which is extremely slow.
* It works better than "dladdr" because it also allows to search private symbols, that are not participated in shared linking.
*/
class SymbolIndex : public ext::singleton<SymbolIndex>
class SymbolIndex : private boost::noncopyable
{
protected:
friend class ext::singleton<SymbolIndex>;
SymbolIndex() { update(); }
public:
static SymbolIndex & instance();
struct Symbol
{
const void * address_begin;

View File

@ -287,3 +287,8 @@ ThreadPool::Job createExceptionHandledJob(ThreadPool::Job job, ExceptionHandler
};
}
GlobalThreadPool & GlobalThreadPool::instance()
{
static GlobalThreadPool ret;
return ret;
}

View File

@ -8,7 +8,6 @@
#include <queue>
#include <list>
#include <optional>
#include <ext/singleton.h>
#include <Poco/Event.h>
#include <Common/ThreadStatus.h>
@ -121,10 +120,11 @@ using FreeThreadPool = ThreadPoolImpl<std::thread>;
* - address sanitizer and thread sanitizer will not fail due to global limit on number of created threads.
* - program will work faster in gdb;
*/
class GlobalThreadPool : public FreeThreadPool, public ext::singleton<GlobalThreadPool>
class GlobalThreadPool : public FreeThreadPool, private boost::noncopyable
{
public:
GlobalThreadPool() : FreeThreadPool(10000, 1000, 10000) {}
static GlobalThreadPool & instance();
};

View File

@ -11,6 +11,7 @@
#endif
#include <gtest/gtest.h>
#include <chrono>
namespace DB

View File

@ -155,4 +155,10 @@ CompressionCodecFactory::CompressionCodecFactory()
registerCodecGorilla(*this);
}
CompressionCodecFactory & CompressionCodecFactory::instance()
{
static CompressionCodecFactory ret;
return ret;
}
}

View File

@ -6,8 +6,6 @@
#include <Parsers/IAST_fwd.h>
#include <Common/IFactoryWithAliases.h>
#include <ext/singleton.h>
#include <functional>
#include <memory>
#include <optional>
@ -24,7 +22,7 @@ using CodecNameWithLevel = std::pair<String, std::optional<int>>;
/** Creates a codec object by name of compression algorithm family and parameters.
*/
class CompressionCodecFactory final : public ext::singleton<CompressionCodecFactory>
class CompressionCodecFactory final : private boost::noncopyable
{
protected:
using Creator = std::function<CompressionCodecPtr(const ASTPtr & parameters)>;
@ -34,6 +32,8 @@ protected:
using CompressionCodecsCodeDictionary = std::unordered_map<UInt8, CreatorWithType>;
public:
static CompressionCodecFactory & instance();
/// Return default codec (currently LZ4)
CompressionCodecPtr getDefaultCodec() const;
@ -65,8 +65,6 @@ private:
CompressionCodecPtr default_codec;
CompressionCodecFactory();
friend class ext::singleton<CompressionCodecFactory>;
};
}

View File

@ -1,3 +1,5 @@
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <IO/WriteBuffer.h>
#include <IO/ReadBufferFromString.h>
#include <IO/WriteBufferFromString.h>
@ -100,3 +102,4 @@ size_t getLengthEncodedStringSize(const String & s)
}
}
#endif

View File

@ -1,4 +1,6 @@
#pragma once
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <ext/scope_guard.h>
#include <openssl/pem.h>
@ -1075,3 +1077,4 @@ private:
}
}
#endif

View File

@ -6,8 +6,8 @@
#include <common/StringRef.h>
#include <Common/SettingsChanges.h>
#include <Core/Types.h>
#include <ext/singleton.h>
#include <unordered_map>
#include <boost/noncopyable.hpp>
namespace DB
@ -329,14 +329,10 @@ private:
bool isChanged(const Derived & collection) const { return is_changed(collection); }
};
class MemberInfos
class MemberInfos : private boost::noncopyable
{
public:
static const MemberInfos & instance()
{
static const MemberInfos single_instance;
return single_instance;
}
static const MemberInfos & instance();
size_t size() const { return infos.size(); }
const MemberInfo & operator[](size_t index) const { return infos[index]; }
@ -630,6 +626,12 @@ public:
LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_COLLECTION_DEFINE_FUNCTIONS_HELPER_) \
}; \
LIST_OF_SETTINGS_MACRO(IMPLEMENT_SETTINGS_COLLECTION_ADD_MEMBER_INFO_HELPER_) \
} \
template <> \
const SettingsCollection<DERIVED_CLASS_NAME>::MemberInfos & SettingsCollection<DERIVED_CLASS_NAME>::MemberInfos::instance() \
{ \
static const SettingsCollection<DERIVED_CLASS_NAME>::MemberInfos single_instance; \
return single_instance; \
}

View File

@ -28,8 +28,6 @@ void copyDataImpl(IBlockInputStream & from, IBlockOutputStream & to, TCancelCall
break;
to.write(block);
if (!block.rows())
to.flush();
progress(block);
}

View File

@ -125,5 +125,4 @@ public:
using DataTypeEnum8 = DataTypeEnum<Int8>;
using DataTypeEnum16 = DataTypeEnum<Int16>;
}

View File

@ -202,4 +202,10 @@ DataTypeFactory::DataTypeFactory()
DataTypeFactory::~DataTypeFactory()
{}
DataTypeFactory & DataTypeFactory::instance()
{
static DataTypeFactory ret;
return ret;
}
}

View File

@ -4,7 +4,6 @@
#include <Parsers/IAST_fwd.h>
#include <Common/IFactoryWithAliases.h>
#include <ext/singleton.h>
#include <functional>
#include <memory>
@ -20,7 +19,7 @@ using DataTypePtr = std::shared_ptr<const IDataType>;
/** Creates a data type by name of data type family and parameters.
*/
class DataTypeFactory final : public ext::singleton<DataTypeFactory>, public IFactoryWithAliases<std::function<DataTypePtr(const ASTPtr & parameters)>>
class DataTypeFactory final : private boost::noncopyable, public IFactoryWithAliases<std::function<DataTypePtr(const ASTPtr & parameters)>>
{
private:
using SimpleCreator = std::function<DataTypePtr()>;
@ -29,6 +28,8 @@ private:
using SimpleCreatorWithCustom = std::function<std::pair<DataTypePtr,DataTypeCustomDescPtr>()>;
public:
static DataTypeFactory & instance();
DataTypePtr get(const String & full_name) const;
DataTypePtr get(const String & family_name, const ASTPtr & parameters) const;
DataTypePtr get(const ASTPtr & ast) const;
@ -62,8 +63,6 @@ private:
const DataTypesDictionary & getCaseInsensitiveCreatorMap() const override { return case_insensitive_data_types; }
String getFactoryName() const override { return "DataTypeFactory"; }
friend class ext::singleton<DataTypeFactory>;
};
}

View File

@ -47,4 +47,10 @@ DictionaryPtr DictionaryFactory::create(
throw Exception{name + ": unknown dictionary layout type: " + layout_type, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG};
}
DictionaryFactory & DictionaryFactory::instance()
{
static DictionaryFactory ret;
return ret;
}
}

View File

@ -1,6 +1,5 @@
#pragma once
#include <ext/singleton.h>
#include "IDictionary.h"
@ -22,9 +21,12 @@ namespace DB
class Context;
class DictionaryFactory : public ext::singleton<DictionaryFactory>
class DictionaryFactory : private boost::noncopyable
{
public:
static DictionaryFactory & instance();
DictionaryPtr create(const std::string & name, const Poco::Util::AbstractConfiguration & config, const std::string & config_prefix, Context & context) const;
using Creator = std::function<DictionaryPtr(

View File

@ -101,4 +101,10 @@ DictionarySourcePtr DictionarySourceFactory::create(
throw Exception{name + ": unknown dictionary source type: " + source_type, ErrorCodes::UNKNOWN_ELEMENT_IN_CONFIG};
}
DictionarySourceFactory & DictionarySourceFactory::instance()
{
static DictionarySourceFactory instance;
return instance;
}
}

View File

@ -4,7 +4,6 @@
#include <Core/Block.h>
#include <unordered_map>
#include <ext/singleton.h>
namespace Poco
{
@ -22,9 +21,11 @@ class Context;
struct DictionaryStructure;
/// creates IDictionarySource instance from config and DictionaryStructure
class DictionarySourceFactory : public ext::singleton<DictionarySourceFactory>
class DictionarySourceFactory : private boost::noncopyable
{
public:
static DictionarySourceFactory & instance();
using Creator = std::function<DictionarySourcePtr(
const DictionaryStructure & dict_struct,
const Poco::Util::AbstractConfiguration & config,

View File

@ -4,7 +4,6 @@
#include <Poco/Exception.h>
#include <Poco/Util/Application.h>
#include <common/logger_useful.h>
#include <ext/singleton.h>
#include "GeodataProviders/IHierarchiesProvider.h"

View File

@ -1,3 +1,4 @@
#include <Common/config.h>
#include <Common/Exception.h>
#include <Interpreters/Context.h>
#include <Core/Settings.h>
@ -312,7 +313,15 @@ FormatFactory::FormatFactory()
registerOutputFormatProcessorODBCDriver(*this);
registerOutputFormatProcessorODBCDriver2(*this);
registerOutputFormatProcessorNull(*this);
#if USE_POCO_NETSSL
registerOutputFormatProcessorMySQLWrite(*this);
#endif
}
FormatFactory & FormatFactory::instance()
{
static FormatFactory ret;
return ret;
}
}

View File

@ -2,11 +2,11 @@
#include <Core/Types.h>
#include <DataStreams/IBlockStream_fwd.h>
#include <ext/singleton.h>
#include <functional>
#include <memory>
#include <unordered_map>
#include <boost/noncopyable.hpp>
namespace DB
@ -34,7 +34,7 @@ using OutputFormatPtr = std::shared_ptr<IOutputFormat>;
/** Allows to create an IBlockInputStream or IBlockOutputStream by the name of the format.
* Note: format and compression are independent things.
*/
class FormatFactory final : public ext::singleton<FormatFactory>
class FormatFactory final : private boost::noncopyable
{
public:
/// This callback allows to perform some additional actions after reading a single row.
@ -87,6 +87,9 @@ private:
using FormatsDictionary = std::unordered_map<String, Creators>;
public:
static FormatFactory & instance();
BlockInputStreamPtr getInput(
const String & name,
ReadBuffer & buf,
@ -128,7 +131,6 @@ private:
FormatsDictionary dict;
FormatFactory();
friend class ext::singleton<FormatFactory>;
const Creators & getCreators(const String & name) const;
};

View File

@ -131,8 +131,6 @@ Block MySQLBlockInputStream::readImpl()
row = result.fetch();
}
if (auto_close)
entry.disconnect();
return description.sample_block.cloneWithColumns(std::move(columns));
}

View File

@ -15,6 +15,11 @@ namespace ErrorCodes
extern const int CANNOT_PARSE_PROTOBUF_SCHEMA;
}
ProtobufSchemas & ProtobufSchemas::instance()
{
static ProtobufSchemas instance;
return instance;
}
class ProtobufSchemas::ImporterWithSourceTree : public google::protobuf::compiler::MultiFileErrorCollector
{

View File

@ -6,7 +6,7 @@
#include <memory>
#include <unordered_map>
#include <Core/Types.h>
#include <ext/singleton.h>
#include <boost/noncopyable.hpp>
namespace google
@ -24,9 +24,11 @@ class FormatSchemaInfo;
/** Keeps parsed google protobuf schemas parsed from files.
* This class is used to handle the "Protobuf" input/output formats.
*/
class ProtobufSchemas : public ext::singleton<ProtobufSchemas>
class ProtobufSchemas : private boost::noncopyable
{
public:
static ProtobufSchemas & instance();
ProtobufSchemas();
~ProtobufSchemas();

View File

@ -73,4 +73,10 @@ FunctionBuilderPtr FunctionFactory::tryGet(
return {};
}
FunctionFactory & FunctionFactory::instance()
{
static FunctionFactory ret;
return ret;
}
}

View File

@ -3,7 +3,6 @@
#include <Functions/IFunction.h>
#include <Common/IFactoryWithAliases.h>
#include <ext/singleton.h>
#include <functional>
#include <memory>
@ -21,10 +20,12 @@ class Context;
* Function could use for initialization (take ownership of shared_ptr, for example)
* some dictionaries from Context.
*/
class FunctionFactory : public ext::singleton<FunctionFactory>, public IFactoryWithAliases<std::function<FunctionBuilderPtr(const Context &)>>
class FunctionFactory : private boost::noncopyable, public IFactoryWithAliases<std::function<FunctionBuilderPtr(const Context &)>>
{
public:
static FunctionFactory & instance();
template <typename Function>
void registerFunction(CaseSensitiveness case_sensitiveness = CaseSensitive)
{

View File

@ -15,7 +15,6 @@ if(USE_HYPERSCAN)
target_include_directories(clickhouse_functions_url SYSTEM PRIVATE ${HYPERSCAN_INCLUDE_DIR})
endif()
include(${ClickHouse_SOURCE_DIR}/cmake/find_gperf.cmake)
if (USE_GPERF)
# Only for regenerate
add_custom_target(generate-tldlookup-gperf ./tldLookup.sh

View File

@ -158,6 +158,12 @@ std::future<AIOContextPool::BytesRead> AIOContextPool::post(struct iocb & iocb)
return promises[request_id].get_future();
}
AIOContextPool & AIOContextPool::instance()
{
static AIOContextPool instance;
return instance;
}
}
#endif

View File

@ -2,7 +2,6 @@
#if defined(__linux__) || defined(__FreeBSD__)
#include <ext/singleton.h>
#include <condition_variable>
#include <future>
#include <mutex>
@ -14,10 +13,8 @@
namespace DB
{
class AIOContextPool : public ext::singleton<AIOContextPool>
class AIOContextPool : private boost::noncopyable
{
friend class ext::singleton<AIOContextPool>;
static const auto max_concurrent_events = 128;
static const auto timeout_sec = 1;
@ -45,6 +42,8 @@ class AIOContextPool : public ext::singleton<AIOContextPool>
void reportExceptionToAnyProducer();
public:
static AIOContextPool & instance();
/// Request AIO read operation for iocb, returns a future with number of bytes read
std::future<BytesRead> post(struct iocb & iocb);
};

View File

@ -0,0 +1,16 @@
#include <IO/DoubleConverter.h>
namespace DB
{
template <bool emit_decimal_point>
const double_conversion::DoubleToStringConverter & DoubleConverter<emit_decimal_point>::instance()
{
static const double_conversion::DoubleToStringConverter instance{
DoubleToStringConverterFlags<emit_decimal_point>::flags, "inf", "nan", 'e', -6, 21, 6, 1};
return instance;
}
template class DoubleConverter<true>;
template class DoubleConverter<false>;
}

View File

@ -6,6 +6,7 @@
#endif
#include <double-conversion/double-conversion.h>
#include <boost/noncopyable.hpp>
#ifdef __clang__
#pragma clang diagnostic pop
@ -26,7 +27,7 @@ template <> struct DoubleToStringConverterFlags<true>
};
template <bool emit_decimal_point>
class DoubleConverter
class DoubleConverter : private boost::noncopyable
{
DoubleConverter(const DoubleConverter &) = delete;
DoubleConverter & operator=(const DoubleConverter &) = delete;
@ -41,14 +42,7 @@ public:
1 + double_conversion::DoubleToStringConverter::kMaxFixedDigitsAfterPoint + 1;
using BufferType = char[MAX_REPRESENTATION_LENGTH];
static const auto & instance()
{
static const double_conversion::DoubleToStringConverter instance{
DoubleToStringConverterFlags<emit_decimal_point>::flags, "inf", "nan", 'e', -6, 21, 6, 1
};
return instance;
}
static const double_conversion::DoubleToStringConverter & instance();
};
}

View File

@ -112,15 +112,13 @@ void setTimeouts(Poco::Net::HTTPClientSession & session, const ConnectionTimeout
}
};
class HTTPSessionPool : public ext::singleton<HTTPSessionPool>
class HTTPSessionPool : private boost::noncopyable
{
private:
using Key = std::tuple<std::string, UInt16, bool>;
using PoolPtr = std::shared_ptr<SingleEndpointHTTPSessionPool>;
using Entry = SingleEndpointHTTPSessionPool::Entry;
friend class ext::singleton<HTTPSessionPool>;
struct Hasher
{
size_t operator()(const Key & k) const
@ -140,6 +138,12 @@ void setTimeouts(Poco::Net::HTTPClientSession & session, const ConnectionTimeout
HTTPSessionPool() = default;
public:
static auto & instance()
{
static HTTPSessionPool instance;
return instance;
}
Entry getSession(
const Poco::URI & uri,
const ConnectionTimeouts & timeouts,

View File

@ -83,4 +83,5 @@ public:
return IHashingBuffer<WriteBuffer>::getHash();
}
};
}

View File

@ -930,15 +930,15 @@ void NO_INLINE Aggregator::convertToBlockImplFinal(
}
}
for (const auto & value : data)
data.forEachValue([&](const auto & key, auto & mapped)
{
method.insertKeyIntoColumns(value.getValue(), key_columns, key_sizes);
method.insertKeyIntoColumns(key, key_columns, key_sizes);
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->insertResultInto(
value.getSecond() + offsets_of_aggregate_states[i],
mapped + offsets_of_aggregate_states[i],
*final_aggregate_columns[i]);
}
});
destroyImpl<Method>(data);
}
@ -961,16 +961,16 @@ void NO_INLINE Aggregator::convertToBlockImplNotFinal(
}
}
for (auto & value : data)
data.forEachValue([&](const auto & key, auto & mapped)
{
method.insertKeyIntoColumns(value.getValue(), key_columns, key_sizes);
method.insertKeyIntoColumns(key, key_columns, key_sizes);
/// reserved, so push_back does not throw exceptions
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_columns[i]->push_back(value.getSecond() + offsets_of_aggregate_states[i]);
aggregate_columns[i]->push_back(mapped + offsets_of_aggregate_states[i]);
value.getSecond() = nullptr;
}
mapped = nullptr;
});
}
@ -1303,32 +1303,27 @@ void NO_INLINE Aggregator::mergeDataImpl(
if constexpr (Method::low_cardinality_optimization)
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
for (auto it = table_src.begin(), end = table_src.end(); it != end; ++it)
table_src.mergeToViaEmplace(table_dst,
[&](AggregateDataPtr & dst, AggregateDataPtr & src, bool inserted)
{
typename Table::iterator res_it;
bool inserted;
table_dst.emplace(it->getFirst(), res_it, inserted, it.getHash());
if (!inserted)
{
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->merge(
res_it->getSecond() + offsets_of_aggregate_states[i],
it->getSecond() + offsets_of_aggregate_states[i],
dst + offsets_of_aggregate_states[i],
src + offsets_of_aggregate_states[i],
arena);
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->destroy(
it->getSecond() + offsets_of_aggregate_states[i]);
aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]);
}
else
{
res_it->getSecond() = it->getSecond();
dst = src;
}
it->getSecond() = nullptr;
}
src = nullptr;
});
table_src.clearAndShrink();
}
@ -1344,26 +1339,21 @@ void NO_INLINE Aggregator::mergeDataNoMoreKeysImpl(
if constexpr (Method::low_cardinality_optimization)
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
for (auto it = table_src.begin(), end = table_src.end(); it != end; ++it)
table_src.mergeToViaFind(table_dst, [&](AggregateDataPtr dst, AggregateDataPtr & src, bool found)
{
typename Table::iterator res_it = table_dst.find(it->getFirst(), it.getHash());
AggregateDataPtr res_data = table_dst.end() == res_it
? overflows
: res_it->getSecond();
AggregateDataPtr res_data = found ? dst : overflows;
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->merge(
res_data + offsets_of_aggregate_states[i],
it->getSecond() + offsets_of_aggregate_states[i],
src + offsets_of_aggregate_states[i],
arena);
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->destroy(it->getSecond() + offsets_of_aggregate_states[i]);
it->getSecond() = nullptr;
}
aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]);
src = nullptr;
});
table_src.clearAndShrink();
}
@ -1377,27 +1367,23 @@ void NO_INLINE Aggregator::mergeDataOnlyExistingKeysImpl(
if constexpr (Method::low_cardinality_optimization)
mergeDataNullKey<Method, Table>(table_dst, table_src, arena);
for (auto it = table_src.begin(); it != table_src.end(); ++it)
table_src.mergeToViaFind(table_dst,
[&](AggregateDataPtr dst, AggregateDataPtr & src, bool found)
{
decltype(it) res_it = table_dst.find(it->getFirst(), it.getHash());
if (table_dst.end() == res_it)
continue;
AggregateDataPtr res_data = res_it->getSecond();
if (!found)
return;
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->merge(
res_data + offsets_of_aggregate_states[i],
it->getSecond() + offsets_of_aggregate_states[i],
dst + offsets_of_aggregate_states[i],
src + offsets_of_aggregate_states[i],
arena);
for (size_t i = 0; i < params.aggregates_size; ++i)
aggregate_functions[i]->destroy(it->getSecond() + offsets_of_aggregate_states[i]);
it->getSecond() = nullptr;
}
aggregate_functions[i]->destroy(src + offsets_of_aggregate_states[i]);
src = nullptr;
});
table_src.clearAndShrink();
}
@ -2255,23 +2241,21 @@ std::vector<Block> Aggregator::convertBlockToTwoLevel(const Block & block)
template <typename Method, typename Table>
void NO_INLINE Aggregator::destroyImpl(Table & table) const
{
for (auto elem : table)
table.forEachMapped([&](AggregateDataPtr & data)
{
AggregateDataPtr & data = elem.getSecond();
/** If an exception (usually a lack of memory, the MemoryTracker throws) arose
* after inserting the key into a hash table, but before creating all states of aggregate functions,
* then data will be equal nullptr.
*/
if (nullptr == data)
continue;
return;
for (size_t i = 0; i < params.aggregates_size; ++i)
if (!aggregate_functions[i]->isState())
aggregate_functions[i]->destroy(data + offsets_of_aggregate_states[i]);
data = nullptr;
}
});
}

View File

@ -182,9 +182,11 @@ struct AggregationMethodOneNumber
static const bool low_cardinality_optimization = false;
// Insert the key from the hash table into columns.
static void insertKeyIntoColumns(const typename Data::value_type & value, MutableColumns & key_columns, const Sizes & /*key_sizes*/)
static void insertKeyIntoColumns(const Key & key, MutableColumns & key_columns, const Sizes & /*key_sizes*/)
{
static_cast<ColumnVectorHelper *>(key_columns[0].get())->insertRawData<sizeof(FieldType)>(reinterpret_cast<const char *>(&value.first));
auto key_holder = reinterpret_cast<const char *>(&key);
auto column = static_cast<ColumnVectorHelper *>(key_columns[0].get());
column->insertRawData<sizeof(FieldType)>(key_holder);
}
};
@ -208,9 +210,9 @@ struct AggregationMethodString
static const bool low_cardinality_optimization = false;
static void insertKeyIntoColumns(const typename Data::value_type & value, MutableColumns & key_columns, const Sizes &)
static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &)
{
key_columns[0]->insertData(value.first.data, value.first.size);
key_columns[0]->insertData(key.data, key.size);
}
};
@ -234,9 +236,9 @@ struct AggregationMethodFixedString
static const bool low_cardinality_optimization = false;
static void insertKeyIntoColumns(const typename Data::value_type & value, MutableColumns & key_columns, const Sizes &)
static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &)
{
key_columns[0]->insertData(value.first.data, value.first.size);
key_columns[0]->insertData(key.data, key.size);
}
};
@ -262,10 +264,19 @@ struct AggregationMethodSingleLowCardinalityColumn : public SingleColumnMethod
static const bool low_cardinality_optimization = true;
static void insertKeyIntoColumns(const typename Data::value_type & value, MutableColumns & key_columns_low_cardinality, const Sizes & /*key_sizes*/)
static void insertKeyIntoColumns(const Key & key,
MutableColumns & key_columns_low_cardinality, const Sizes & /*key_sizes*/)
{
auto ref = BaseState::getValueRef(value);
assert_cast<ColumnLowCardinality *>(key_columns_low_cardinality[0].get())->insertData(ref.data, ref.size);
auto col = assert_cast<ColumnLowCardinality *>(key_columns_low_cardinality[0].get());
if constexpr (std::is_same_v<Key, StringRef>)
{
col->insertData(key.data, key.size);
}
else
{
col->insertData(reinterpret_cast<const char *>(&key), sizeof(key));
}
}
};
@ -293,7 +304,7 @@ struct AggregationMethodKeysFixed
static const bool low_cardinality_optimization = false;
static void insertKeyIntoColumns(const typename Data::value_type & value, MutableColumns & key_columns, const Sizes & key_sizes)
static void insertKeyIntoColumns(const Key & key, MutableColumns & key_columns, const Sizes & key_sizes)
{
size_t keys_size = key_columns.size();
@ -330,7 +341,7 @@ struct AggregationMethodKeysFixed
/// corresponding key is nullable. Update the null map accordingly.
size_t bucket = i / 8;
size_t offset = i % 8;
UInt8 val = (reinterpret_cast<const UInt8 *>(&value.first)[bucket] >> offset) & 1;
UInt8 val = (reinterpret_cast<const UInt8 *>(&key)[bucket] >> offset) & 1;
null_map->insertValue(val);
is_null = val == 1;
}
@ -340,7 +351,7 @@ struct AggregationMethodKeysFixed
else
{
size_t size = key_sizes[i];
observed_column->insertData(reinterpret_cast<const char *>(&value.first) + pos, size);
observed_column->insertData(reinterpret_cast<const char *>(&key) + pos, size);
pos += size;
}
}
@ -371,9 +382,9 @@ struct AggregationMethodSerialized
static const bool low_cardinality_optimization = false;
static void insertKeyIntoColumns(const typename Data::value_type & value, MutableColumns & key_columns, const Sizes &)
static void insertKeyIntoColumns(const StringRef & key, MutableColumns & key_columns, const Sizes &)
{
auto pos = value.first.data;
auto pos = key.data;
for (auto & column : key_columns)
pos = column->deserializeAndInsertFromArena(pos);
}

View File

@ -38,6 +38,7 @@
#include <Processors/Transforms/LimitsCheckingTransform.h>
#include <Processors/Transforms/MaterializingTransform.h>
#include <Processors/Formats/IOutputFormat.h>
#include <Parsers/ASTWatchQuery.h>
namespace ProfileEvents
{
@ -660,7 +661,19 @@ void executeQuery(
if (set_query_id)
set_query_id(context.getClientInfo().current_query_id);
copyData(*streams.in, *out);
if (ast->as<ASTWatchQuery>())
{
/// For Watch query, flush data if block is empty (to send data to client).
auto flush_callback = [&out](const Block & block)
{
if (block.rows() == 0)
out->flush();
};
copyData(*streams.in, *out, [](){ return false; }, std::move(flush_callback));
}
else
copyData(*streams.in, *out);
}
if (pipeline.initialized())

View File

@ -1,7 +1,7 @@
#include "config_formats.h"
#include "ArrowColumnToCHColumn.h"
#if USE_ORC or USE_PARQUET
#if USE_ORC || USE_PARQUET
#include <DataTypes/DataTypeFactory.h>
#include <DataTypes/DataTypeNullable.h>
#include <DataTypes/DataTypeString.h>

View File

@ -1,6 +1,6 @@
#include "config_formats.h"
#if USE_ORC or USE_PARQUET
#if USE_ORC || USE_PARQUET
#include <DataTypes/IDataType.h>
#include <DataTypes/DataTypesNumber.h>

View File

@ -1,3 +1,5 @@
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Processors/Formats/Impl/MySQLOutputFormat.h>
#include <Core/MySQLProtocol.h>
@ -116,3 +118,4 @@ void registerOutputFormatProcessorMySQLWrite(FormatFactory & factory)
}
}
#endif

View File

@ -1,4 +1,6 @@
#pragma once
#include <Common/config.h>
#if USE_POCO_NETSSL
#include <Processors/Formats/IRowOutputFormat.h>
#include <Core/Block.h>
@ -40,3 +42,4 @@ private:
};
}
#endif

View File

@ -72,9 +72,7 @@ void ReadBufferFromKafkaConsumer::commit()
PrintOffsets("Polled offset", consumer->get_offsets_position(consumer->get_assignment()));
/// Since we can poll more messages than we already processed - commit only processed messages.
if (!messages.empty())
consumer->async_commit(*std::prev(current));
consumer->async_commit();
PrintOffsets("Committed offset", consumer->get_offsets_committed(consumer->get_assignment()));
@ -152,8 +150,6 @@ bool ReadBufferFromKafkaConsumer::nextImpl()
return true;
}
put_delimiter = (delimiter != 0);
if (current == messages.end())
{
if (intermediate_commit)
@ -185,6 +181,10 @@ bool ReadBufferFromKafkaConsumer::nextImpl()
// XXX: very fishy place with const casting.
auto new_position = reinterpret_cast<char *>(const_cast<unsigned char *>(current->get_payload().get_data()));
BufferBase::set(new_position, current->get_payload().get_size(), 0);
put_delimiter = (delimiter != 0);
/// Since we can poll more messages than we already processed - commit only processed messages.
consumer->store_offset(*current);
++current;

View File

@ -261,9 +261,10 @@ ConsumerBufferPtr StorageKafka::createReadBuffer()
conf.set("metadata.broker.list", brokers);
conf.set("group.id", group);
conf.set("client.id", VERSION_FULL);
conf.set("auto.offset.reset", "smallest"); // If no offset stored for this group, read all messages from the start
conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished
conf.set("enable.partition.eof", "false"); // Ignore EOF messages
conf.set("auto.offset.reset", "smallest"); // If no offset stored for this group, read all messages from the start
conf.set("enable.auto.commit", "false"); // We manually commit offsets after a stream successfully finished
conf.set("enable.auto.offset.store", "false"); // Update offset automatically - to commit them all at once.
conf.set("enable.partition.eof", "false"); // Ignore EOF messages
updateConfiguration(conf);
// Create a consumer and subscribe to topics

View File

@ -86,4 +86,10 @@ MergeTreeIndexFactory::MergeTreeIndexFactory()
registerIndex("bloom_filter", bloomFilterIndexCreatorNew);
}
MergeTreeIndexFactory & MergeTreeIndexFactory::instance()
{
static MergeTreeIndexFactory instance;
return instance;
}
}

View File

@ -5,7 +5,6 @@
#include <vector>
#include <memory>
#include <Core/Block.h>
#include <ext/singleton.h>
#include <Storages/MergeTree/MergeTreeDataPartChecksum.h>
#include <Storages/SelectQueryInfo.h>
#include <Storages/MergeTree/MarkRange.h>
@ -129,11 +128,11 @@ public:
using MergeTreeIndices = std::vector<MutableMergeTreeIndexPtr>;
class MergeTreeIndexFactory : public ext::singleton<MergeTreeIndexFactory>
class MergeTreeIndexFactory : private boost::noncopyable
{
friend class ext::singleton<MergeTreeIndexFactory>;
public:
static MergeTreeIndexFactory & instance();
using Creator = std::function<
std::unique_ptr<IMergeTreeIndex>(
const NamesAndTypesList & columns,

View File

@ -163,4 +163,10 @@ StoragePtr StorageFactory::get(
return it->second(arguments);
}
StorageFactory & StorageFactory::instance()
{
static StorageFactory ret;
return ret;
}
}

View File

@ -5,7 +5,6 @@
#include <Storages/ColumnsDescription.h>
#include <Storages/ConstraintsDescription.h>
#include <Storages/IStorage_fwd.h>
#include <ext/singleton.h>
#include <unordered_map>
@ -21,9 +20,12 @@ class ASTStorage;
* In 'columns' Nested data structures must be flattened.
* You should subsequently call IStorage::startup method to work with table.
*/
class StorageFactory : public ext::singleton<StorageFactory>, public IHints<1, StorageFactory>
class StorageFactory : private boost::noncopyable, public IHints<1, StorageFactory>
{
public:
static StorageFactory & instance();
struct Arguments
{
const String & engine_name;

View File

@ -70,4 +70,10 @@ bool TableFunctionFactory::isTableFunctionName(const std::string & name) const
return table_functions.count(name);
}
TableFunctionFactory & TableFunctionFactory::instance()
{
static TableFunctionFactory ret;
return ret;
}
}

View File

@ -4,12 +4,12 @@
#include <Common/IFactoryWithAliases.h>
#include <Common/NamePrompter.h>
#include <ext/singleton.h>
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <boost/noncopyable.hpp>
namespace DB
@ -21,10 +21,12 @@ using TableFunctionCreator = std::function<TableFunctionPtr()>;
/** Lets you get a table function by its name.
*/
class TableFunctionFactory final: public ext::singleton<TableFunctionFactory>, public IFactoryWithAliases<TableFunctionCreator>
class TableFunctionFactory final: private boost::noncopyable, public IFactoryWithAliases<TableFunctionCreator>
{
public:
static TableFunctionFactory & instance();
/// Register a function by its name.
/// No locking, you must register all functions before usage of get.
void registerFunction(const std::string & name, Creator creator, CaseSensitiveness case_sensitiveness = CaseSensitive);

View File

@ -248,6 +248,21 @@ def test_kafka_tsv_with_delimiter(kafka_cluster):
kafka_check_result(result, True)
@pytest.mark.timeout(180)
def test_kafka_select_empty(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
@pytest.mark.timeout(180)
def test_kafka_json_without_delimiter(kafka_cluster):
instance.query('''

View File

@ -0,0 +1,2 @@
0
10

View File

@ -0,0 +1,5 @@
drop table if exists tab;
create table tab (x UInt64) engine = MergeTree order by tuple();
insert into tab select number as n from numbers(20) any inner join (select number * 10 as n from numbers(2)) using(n) settings any_join_distinct_right_table_keys = 1, max_block_size = 5;
select * from tab order by x;

View File

@ -0,0 +1,24 @@
Set any_join_distinct_right_table_keys=1;
DROP TABLE IF EXISTS test_insert_t1;
DROP TABLE IF EXISTS test_insert_t2;
DROP TABLE IF EXISTS test_insert_t3;
CREATE TABLE test_insert_t1 (`dt` Date, `uid` String, `name` String, `city` String) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY name SETTINGS index_granularity = 8192;
CREATE TABLE test_insert_t2 (`dt` Date, `uid` String) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY uid SETTINGS index_granularity = 8192;
CREATE TABLE test_insert_t3 (`dt` Date, `uid` String, `name` String, `city` String) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY name SETTINGS index_granularity = 8192;
INSERT INTO test_insert_t1 SELECT '2019-09-01',toString(number),toString(rand()),toString(rand()) FROM system.numbers WHERE number > 10 limit 1000000;
INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=0 limit 200;
INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=100000 limit 200;
INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=300000 limit 200;
INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=500000 limit 200;
INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=700000 limit 200;
INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=900000 limit 200;
INSERT INTO test_insert_t3 SELECT '2019-09-01', uid, name, city FROM ( SELECT dt, uid, name, city FROM test_insert_t1 WHERE dt = '2019-09-01') t1 GLOBAL ANY INNER JOIN (SELECT uid FROM test_insert_t2 WHERE dt = '2019-09-01') t2 ON t1.uid=t2.uid;
SELECT count(*) FROM test_insert_t3;
DROP TABLE test_insert_t1;
DROP TABLE test_insert_t2;
DROP TABLE test_insert_t3;

View File

@ -29,6 +29,7 @@ The supported formats are:
| [Protobuf](#protobuf) | ✔ | ✔ |
| [Parquet](#data-format-parquet) | ✔ | ✔ |
| [RowBinary](#rowbinary) | ✔ | ✔ |
| [RowBinaryWithNamesAndTypes](#rowbinarywithnamesandtypes) | ✔ | ✔ |
| [Native](#native) | ✔ | ✔ |
| [Null](#null) | ✗ | ✔ |
| [XML](#xml) | ✗ | ✔ |
@ -680,9 +681,10 @@ For [NULL](../query_language/syntax.md#null-literal) support, an additional byte
## RowBinaryWithNamesAndTypes {#rowbinarywithnamesandtypes}
Similar to [RowBinary](#rowbinary), but with added header:
* [LEB128](https://en.wikipedia.org/wiki/LEB128)-encoded number of columns (N)
* N `String`s specifying column names
* N `String`s specifying column types
* [LEB128](https://en.wikipedia.org/wiki/LEB128)-encoded number of columns (N)
* N `String`s specifying column names
* N `String`s specifying column types
## Values {#data-format-values}
@ -924,17 +926,17 @@ Data types of a ClickHouse table columns can differ from the corresponding field
You can insert Parquet data from a file into ClickHouse table by the following command:
```
```bash
cat {filename} | clickhouse-client --query="INSERT INTO {some_table} FORMAT Parquet"
```
You can select data from a ClickHouse table and save them into some file in the Parquet format by the following command:
```
```sql
clickhouse-client --query="SELECT * FROM {some_table} FORMAT Parquet" > {some_file.pq}
```
To exchange data with the Hadoop, you can use [`HDFS` table engine](../../operations/table_engines/hdfs.md).
To exchange data with the Hadoop, you can use [HDFS table engine](../operations/table_engines/hdfs.md).
## Format Schema {#formatschema}

View File

@ -69,6 +69,10 @@ Features:
- Pager support for the data output.
- Custom PostgreSQL-like commands.
### clickhouse-flamegraph
[clickhouse-flamegraph](https://github.com/Slach/clickhouse-flamegraph) is a specialized tool to visualize the `system.trace_log` as [flamegraph](http://www.brendangregg.com/flamegraphs.html).
## Commercial
### DataGrip

View File

@ -857,6 +857,18 @@ Possible values:
Default value: 0.
## optimize_throw_if_noop {#setting-optimize_throw_if_noop}
Enables or disables throwing an exception if the [OPTIMIZE](../../query_language/misc.md#misc_operations-optimize) query have not performed a merge.
By default `OPTIMIZE` returns successfully even if it haven't done anything. This setting allows to distinguish this situation and get the reason in exception message.
Possible values:
- 1 — Throwing an exception is enabled.
- 0 — Throwing an exception is disabled.
Default value: 0.
## distributed_replica_error_half_life {#settings-distributed_replica_error_half_life}
- Type: seconds
@ -882,4 +894,19 @@ Error count of each replica is capped at this value, preventing a single replica
- [Table engine Distributed](../../operations/table_engines/distributed.md)
- [`distributed_replica_error_half_life`](#settings-distributed_replica_error_half_life)
## os_thread_priority {#setting-os_thread_priority}
Sets the priority ([nice](https://en.wikipedia.org/wiki/Nice_(Unix))) for threads that execute queries. OS scheduler considers this priority when choosing the next thread to run on each available CPU core.
!!! warning "Warning"
To use this setting, you need to set the `CAP_SYS_NICE` capability. The `clickhouse-server` package sets it up during installation. Some virtual environments don't allow to set the `CAP_SYS_NICE` capability. In this case `clickhouse-server` shows a message about it at the start.
Possible values:
You can set values in the `[-20, 19]` range.
The lower value means a higher priority. Threads with low values of `nice` priority are executed more frequently than threads with high values. High values are preferable for long running non-interactive queries because it allows them to quickly give up resources in favour of short interactive queries when they arrive.
Default value: 0.
[Original article](https://clickhouse.yandex/docs/en/operations/settings/settings/) <!-- hide -->

View File

@ -62,11 +62,11 @@ Columns:
Please note that `errors_count` is updated once per query to the cluster, but `estimated_recovery_time` is recalculated on-demand. So there could be a case of non-zero `errors_count` and zero `estimated_recovery_time`, that next query will zero `errors_count` and try to use replica as if it has no errors.
** See also **
**See also**
- [Table engine Distributed](../../operations/table_engines/distributed.md)
- [distributed_replica_error_cap setting](../settings/settings.md#settings-distributed_replica_error_cap)
- [distributed_replica_error_half_life setting](../settings/settings.md#settings-distributed_replica_error_half_life)
- [Table engine Distributed](table_engines/distributed.md)
- [distributed_replica_error_cap setting](settings/settings.md#settings-distributed_replica_error_cap)
- [distributed_replica_error_half_life setting](settings/settings.md#settings-distributed_replica_error_half_life)
## system.columns

View File

@ -388,10 +388,66 @@ When the values in the column expire, ClickHouse replaces them with the default
The `TTL` clause can't be used for key columns.
Examples:
Creating a table with TTL
```sql
CREATE TABLE example_table
(
d DateTime,
a Int TTL d + INTERVAL 1 MONTH,
b Int TTL d + INTERVAL 1 MONTH,
c String
)
ENGINE = MergeTree
PARTITION BY toYYYYMM(d)
ORDER BY d;
```
Adding TTL to a column of an existing table
```sql
ALTER TABLE example_table
MODIFY COLUMN
c String TTL d + INTERVAL 1 DAY;
```
Altering TTL of the column
```sql
ALTER TABLE example_table
MODIFY COLUMN
c String TTL d + INTERVAL 1 MONTH;
```
**Table TTL**
When data in a table expires, ClickHouse deletes all corresponding rows.
Examples:
Creating a table with TTL
```sql
CREATE TABLE example_table
(
d DateTime,
a Int
)
ENGINE = MergeTree
PARTITION BY toYYYYMM(d)
ORDER BY d
TTL d + INTERVAL 1 MONTH;
```
Altering TTL of the table
```sql
ALTER TABLE example_table
MODIFY TTL d + INTERVAL 1 DAY;
```
**Removing Data**
Data with an expired TTL is removed when ClickHouse merges data parts.

Some files were not shown because too many files have changed in this diff Show More